2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
94 #include "soc15_common.h"
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
122 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
125 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
137 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139 * requests into DC requests, and DC responses into DRM responses.
141 * The root control structure is &struct amdgpu_display_manager.
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
151 switch (link->dpcd_caps.dongle_type) {
152 case DISPLAY_DONGLE_NONE:
153 return DRM_MODE_SUBCONNECTOR_Native;
154 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 return DRM_MODE_SUBCONNECTOR_VGA;
156 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 return DRM_MODE_SUBCONNECTOR_DVID;
159 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 return DRM_MODE_SUBCONNECTOR_HDMIA;
162 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
164 return DRM_MODE_SUBCONNECTOR_Unknown;
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
170 struct dc_link *link = aconnector->dc_link;
171 struct drm_connector *connector = &aconnector->base;
172 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
174 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
177 if (aconnector->dc_sink)
178 subconnector = get_subconnector_type(link);
180 drm_object_property_set_value(&connector->base,
181 connector->dev->mode_config.dp_subconnector_property,
186 * initializes drm_device display related structures, based on the information
187 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188 * drm_encoder, drm_mode_config
190 * Returns 0 on success
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 unsigned long possible_crtcs,
199 const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 struct drm_plane *plane,
202 uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 struct amdgpu_dm_connector *amdgpu_dm_connector,
206 struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 struct amdgpu_encoder *aencoder,
209 uint32_t link_index);
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 struct drm_atomic_state *state);
218 static void handle_cursor_update(struct drm_plane *plane,
219 struct drm_plane_state *old_plane_state);
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 struct drm_crtc_state *new_crtc_state);
231 * dm_vblank_get_counter
234 * Get counter for number of vertical blanks
237 * struct amdgpu_device *adev - [in] desired amdgpu device
238 * int disp_idx - [in] which CRTC to get the counter from
241 * Counter for vertical blanks
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
245 if (crtc >= adev->mode_info.num_crtc)
248 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
250 if (acrtc->dm_irq_params.stream == NULL) {
251 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261 u32 *vbl, u32 *position)
263 uint32_t v_blank_start, v_blank_end, h_position, v_position;
265 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
270 if (acrtc->dm_irq_params.stream == NULL) {
271 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
277 * TODO rework base driver to use values directly.
278 * for now parse it back into reg-format
280 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
286 *position = v_position | (h_position << 16);
287 *vbl = v_blank_start | (v_blank_end << 16);
293 static bool dm_is_idle(void *handle)
299 static int dm_wait_for_idle(void *handle)
305 static bool dm_check_soft_reset(void *handle)
310 static int dm_soft_reset(void *handle)
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320 struct drm_device *dev = adev_to_drm(adev);
321 struct drm_crtc *crtc;
322 struct amdgpu_crtc *amdgpu_crtc;
324 if (WARN_ON(otg_inst == -1))
325 return adev->mode_info.crtcs[0];
327 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 amdgpu_crtc = to_amdgpu_crtc(crtc);
330 if (amdgpu_crtc->otg_inst == otg_inst)
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
339 return acrtc->dm_irq_params.freesync_config.state ==
340 VRR_STATE_ACTIVE_VARIABLE ||
341 acrtc->dm_irq_params.freesync_config.state ==
342 VRR_STATE_ACTIVE_FIXED;
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
347 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 struct dm_crtc_state *new_state)
354 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
356 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
363 * dm_pflip_high_irq() - Handle pageflip interrupt
364 * @interrupt_params: ignored
366 * Handles the pageflip interrupt by notifying all interested parties
367 * that the pageflip has been completed.
369 static void dm_pflip_high_irq(void *interrupt_params)
371 struct amdgpu_crtc *amdgpu_crtc;
372 struct common_irq_params *irq_params = interrupt_params;
373 struct amdgpu_device *adev = irq_params->adev;
375 struct drm_pending_vblank_event *e;
376 uint32_t vpos, hpos, v_blank_start, v_blank_end;
379 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
381 /* IRQ could occur when in initial stage */
382 /* TODO work and BO cleanup */
383 if (amdgpu_crtc == NULL) {
384 DC_LOG_PFLIP("CRTC is null, returning.\n");
388 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
390 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392 amdgpu_crtc->pflip_status,
393 AMDGPU_FLIP_SUBMITTED,
394 amdgpu_crtc->crtc_id,
396 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
400 /* page flip completed. */
401 e = amdgpu_crtc->event;
402 amdgpu_crtc->event = NULL;
406 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
408 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
410 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411 &v_blank_end, &hpos, &vpos) ||
412 (vpos < v_blank_start)) {
413 /* Update to correct count and vblank timestamp if racing with
414 * vblank irq. This also updates to the correct vblank timestamp
415 * even in VRR mode, as scanout is past the front-porch atm.
417 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
419 /* Wake up userspace by sending the pageflip event with proper
420 * count and timestamp of vblank of flip completion.
423 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
425 /* Event sent, so done with vblank for this flip */
426 drm_crtc_vblank_put(&amdgpu_crtc->base);
429 /* VRR active and inside front-porch: vblank count and
430 * timestamp for pageflip event will only be up to date after
431 * drm_crtc_handle_vblank() has been executed from late vblank
432 * irq handler after start of back-porch (vline 0). We queue the
433 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 * updated timestamp and count, once it runs after us.
436 * We need to open-code this instead of using the helper
437 * drm_crtc_arm_vblank_event(), as that helper would
438 * call drm_crtc_accurate_vblank_count(), which we must
439 * not call in VRR mode while we are in front-porch!
442 /* sequence will be replaced by real count during send-out. */
443 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 e->pipe = amdgpu_crtc->crtc_id;
446 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
450 /* Keep track of vblank of this flip for flip throttling. We use the
451 * cooked hw counter, as that one incremented at start of this vblank
452 * of pageflip completion, so last_flip_vblank is the forbidden count
453 * for queueing new pageflips if vsync + VRR is enabled.
455 amdgpu_crtc->dm_irq_params.last_flip_vblank =
456 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
458 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
461 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 amdgpu_crtc->crtc_id, amdgpu_crtc,
463 vrr_active, (int) !e);
466 static void dm_vupdate_high_irq(void *interrupt_params)
468 struct common_irq_params *irq_params = interrupt_params;
469 struct amdgpu_device *adev = irq_params->adev;
470 struct amdgpu_crtc *acrtc;
471 struct drm_device *drm_dev;
472 struct drm_vblank_crtc *vblank;
473 ktime_t frame_duration_ns, previous_timestamp;
477 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
480 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481 drm_dev = acrtc->base.dev;
482 vblank = &drm_dev->vblank[acrtc->base.index];
483 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484 frame_duration_ns = vblank->time - previous_timestamp;
486 if (frame_duration_ns > 0) {
487 trace_amdgpu_refresh_rate_track(acrtc->base.index,
489 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490 atomic64_set(&irq_params->previous_timestamp, vblank->time);
493 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
497 /* Core vblank handling is done here after end of front-porch in
498 * vrr mode, as vblank timestamping will give valid results
499 * while now done after front-porch. This will also deliver
500 * page-flip completion events that have been queued to us
501 * if a pageflip happened inside front-porch.
504 drm_crtc_handle_vblank(&acrtc->base);
506 /* BTR processing for pre-DCE12 ASICs */
507 if (acrtc->dm_irq_params.stream &&
508 adev->family < AMDGPU_FAMILY_AI) {
509 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510 mod_freesync_handle_v_update(
511 adev->dm.freesync_module,
512 acrtc->dm_irq_params.stream,
513 &acrtc->dm_irq_params.vrr_params);
515 dc_stream_adjust_vmin_vmax(
517 acrtc->dm_irq_params.stream,
518 &acrtc->dm_irq_params.vrr_params.adjust);
519 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
526 * dm_crtc_high_irq() - Handles CRTC interrupt
527 * @interrupt_params: used for determining the CRTC instance
529 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
532 static void dm_crtc_high_irq(void *interrupt_params)
534 struct common_irq_params *irq_params = interrupt_params;
535 struct amdgpu_device *adev = irq_params->adev;
536 struct amdgpu_crtc *acrtc;
540 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
544 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
546 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547 vrr_active, acrtc->dm_irq_params.active_planes);
550 * Core vblank handling at start of front-porch is only possible
551 * in non-vrr mode, as only there vblank timestamping will give
552 * valid results while done in front-porch. Otherwise defer it
553 * to dm_vupdate_high_irq after end of front-porch.
556 drm_crtc_handle_vblank(&acrtc->base);
559 * Following stuff must happen at start of vblank, for crc
560 * computation and below-the-range btr support in vrr mode.
562 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
564 /* BTR updates need to happen before VUPDATE on Vega and above. */
565 if (adev->family < AMDGPU_FAMILY_AI)
568 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
570 if (acrtc->dm_irq_params.stream &&
571 acrtc->dm_irq_params.vrr_params.supported &&
572 acrtc->dm_irq_params.freesync_config.state ==
573 VRR_STATE_ACTIVE_VARIABLE) {
574 mod_freesync_handle_v_update(adev->dm.freesync_module,
575 acrtc->dm_irq_params.stream,
576 &acrtc->dm_irq_params.vrr_params);
578 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579 &acrtc->dm_irq_params.vrr_params.adjust);
583 * If there aren't any active_planes then DCH HUBP may be clock-gated.
584 * In that case, pageflip completion interrupts won't fire and pageflip
585 * completion events won't get delivered. Prevent this by sending
586 * pending pageflip events from here if a flip is still pending.
588 * If any planes are enabled, use dm_pflip_high_irq() instead, to
589 * avoid race conditions between flip programming and completion,
590 * which could cause too early flip completion events.
592 if (adev->family >= AMDGPU_FAMILY_RV &&
593 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594 acrtc->dm_irq_params.active_planes == 0) {
596 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
598 drm_crtc_vblank_put(&acrtc->base);
600 acrtc->pflip_status = AMDGPU_FLIP_NONE;
603 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
609 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610 * DCN generation ASICs
611 * @interrupt_params: interrupt parameters
613 * Used to set crc window/read out crc value at vertical line 0 position
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
617 struct common_irq_params *irq_params = interrupt_params;
618 struct amdgpu_device *adev = irq_params->adev;
619 struct amdgpu_crtc *acrtc;
621 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
626 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
631 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632 * @adev: amdgpu_device pointer
633 * @notify: dmub notification structure
635 * Dmub AUX or SET_CONFIG command completion processing callback
636 * Copies dmub notification to DM which is to be read by AUX command.
637 * issuing thread and also signals the event to wake up the thread.
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 struct dmub_notification *notify)
642 if (adev->dm.dmub_notify)
643 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 complete(&adev->dm.dmub_aux_transfer_done);
649 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650 * @adev: amdgpu_device pointer
651 * @notify: dmub notification structure
653 * Dmub Hpd interrupt processing callback. Gets displayindex through the
654 * ink index and calls helper to do the processing.
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657 struct dmub_notification *notify)
659 struct amdgpu_dm_connector *aconnector;
660 struct amdgpu_dm_connector *hpd_aconnector = NULL;
661 struct drm_connector *connector;
662 struct drm_connector_list_iter iter;
663 struct dc_link *link;
664 uint8_t link_index = 0;
665 struct drm_device *dev;
670 if (notify == NULL) {
671 DRM_ERROR("DMUB HPD callback notification was NULL");
675 if (notify->link_index > adev->dm.dc->link_count) {
676 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
680 link_index = notify->link_index;
681 link = adev->dm.dc->links[link_index];
684 drm_connector_list_iter_begin(dev, &iter);
685 drm_for_each_connector_iter(connector, &iter) {
686 aconnector = to_amdgpu_dm_connector(connector);
687 if (link && aconnector->dc_link == link) {
688 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689 hpd_aconnector = aconnector;
693 drm_connector_list_iter_end(&iter);
695 if (hpd_aconnector) {
696 if (notify->type == DMUB_NOTIFICATION_HPD)
697 handle_hpd_irq_helper(hpd_aconnector);
698 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 handle_hpd_rx_irq(hpd_aconnector);
704 * register_dmub_notify_callback - Sets callback for DMUB notify
705 * @adev: amdgpu_device pointer
706 * @type: Type of dmub notification
707 * @callback: Dmub interrupt callback function
708 * @dmub_int_thread_offload: offload indicator
710 * API to register a dmub callback handler for a dmub notification
711 * Also sets indicator whether callback processing to be offloaded.
712 * to dmub interrupt handling thread
713 * Return: true if successfully registered, false if there is existing registration
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 enum dmub_notification_type type,
717 dmub_notify_interrupt_callback_t callback,
718 bool dmub_int_thread_offload)
720 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 adev->dm.dmub_callback[type] = callback;
722 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
729 static void dm_handle_hpd_work(struct work_struct *work)
731 struct dmub_hpd_work *dmub_hpd_wrk;
733 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
735 if (!dmub_hpd_wrk->dmub_notify) {
736 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
740 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 dmub_hpd_wrk->dmub_notify);
745 kfree(dmub_hpd_wrk->dmub_notify);
750 #define DMUB_TRACE_MAX_READ 64
752 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753 * @interrupt_params: used for determining the Outbox instance
755 * Handles the Outbox Interrupt
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
760 struct dmub_notification notify;
761 struct common_irq_params *irq_params = interrupt_params;
762 struct amdgpu_device *adev = irq_params->adev;
763 struct amdgpu_display_manager *dm = &adev->dm;
764 struct dmcub_trace_buf_entry entry = { 0 };
766 struct dmub_hpd_work *dmub_hpd_wrk;
767 struct dc_link *plink = NULL;
769 if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
773 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
774 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775 DRM_ERROR("DM: notify type %d invalid!", notify.type);
778 if (!dm->dmub_callback[notify.type]) {
779 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
782 if (dm->dmub_thread_offload[notify.type] == true) {
783 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
785 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
788 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 if (!dmub_hpd_wrk->dmub_notify) {
791 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
794 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 if (dmub_hpd_wrk->dmub_notify)
796 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
797 dmub_hpd_wrk->adev = adev;
798 if (notify.type == DMUB_NOTIFICATION_HPD) {
799 plink = adev->dm.dc->links[notify.link_index];
802 notify.hpd_status == DP_HPD_PLUG;
805 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
807 dm->dmub_callback[notify.type](adev, ¬ify);
809 } while (notify.pending_notification);
814 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 entry.param0, entry.param1);
818 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
825 } while (count <= DMUB_TRACE_MAX_READ);
827 if (count > DMUB_TRACE_MAX_READ)
828 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
832 static int dm_set_clockgating_state(void *handle,
833 enum amd_clockgating_state state)
838 static int dm_set_powergating_state(void *handle,
839 enum amd_powergating_state state)
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
847 /* Allocate memory for FBC compressed data */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
850 struct drm_device *dev = connector->dev;
851 struct amdgpu_device *adev = drm_to_adev(dev);
852 struct dm_compressor_info *compressor = &adev->dm.compressor;
853 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854 struct drm_display_mode *mode;
855 unsigned long max_size = 0;
857 if (adev->dm.dc->fbc_compressor == NULL)
860 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
863 if (compressor->bo_ptr)
867 list_for_each_entry(mode, &connector->modes, head) {
868 if (max_size < mode->htotal * mode->vtotal)
869 max_size = mode->htotal * mode->vtotal;
873 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875 &compressor->gpu_addr, &compressor->cpu_addr);
878 DRM_ERROR("DM: Failed to initialize FBC\n");
880 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889 int pipe, bool *enabled,
890 unsigned char *buf, int max_bytes)
892 struct drm_device *dev = dev_get_drvdata(kdev);
893 struct amdgpu_device *adev = drm_to_adev(dev);
894 struct drm_connector *connector;
895 struct drm_connector_list_iter conn_iter;
896 struct amdgpu_dm_connector *aconnector;
901 mutex_lock(&adev->dm.audio_lock);
903 drm_connector_list_iter_begin(dev, &conn_iter);
904 drm_for_each_connector_iter(connector, &conn_iter) {
905 aconnector = to_amdgpu_dm_connector(connector);
906 if (aconnector->audio_inst != port)
910 ret = drm_eld_size(connector->eld);
911 memcpy(buf, connector->eld, min(max_bytes, ret));
915 drm_connector_list_iter_end(&conn_iter);
917 mutex_unlock(&adev->dm.audio_lock);
919 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925 .get_eld = amdgpu_dm_audio_component_get_eld,
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929 struct device *hda_kdev, void *data)
931 struct drm_device *dev = dev_get_drvdata(kdev);
932 struct amdgpu_device *adev = drm_to_adev(dev);
933 struct drm_audio_component *acomp = data;
935 acomp->ops = &amdgpu_dm_audio_component_ops;
937 adev->dm.audio_component = acomp;
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943 struct device *hda_kdev, void *data)
945 struct drm_device *dev = dev_get_drvdata(kdev);
946 struct amdgpu_device *adev = drm_to_adev(dev);
947 struct drm_audio_component *acomp = data;
951 adev->dm.audio_component = NULL;
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955 .bind = amdgpu_dm_audio_component_bind,
956 .unbind = amdgpu_dm_audio_component_unbind,
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
966 adev->mode_info.audio.enabled = true;
968 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
970 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971 adev->mode_info.audio.pin[i].channels = -1;
972 adev->mode_info.audio.pin[i].rate = -1;
973 adev->mode_info.audio.pin[i].bits_per_sample = -1;
974 adev->mode_info.audio.pin[i].status_bits = 0;
975 adev->mode_info.audio.pin[i].category_code = 0;
976 adev->mode_info.audio.pin[i].connected = false;
977 adev->mode_info.audio.pin[i].id =
978 adev->dm.dc->res_pool->audios[i]->inst;
979 adev->mode_info.audio.pin[i].offset = 0;
982 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
986 adev->dm.audio_registered = true;
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
996 if (!adev->mode_info.audio.enabled)
999 if (adev->dm.audio_registered) {
1000 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 adev->dm.audio_registered = false;
1004 /* TODO: Disable audio? */
1006 adev->mode_info.audio.enabled = false;
1009 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1011 struct drm_audio_component *acomp = adev->dm.audio_component;
1013 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1016 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1023 const struct dmcub_firmware_header_v1_0 *hdr;
1024 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028 struct abm *abm = adev->dm.dc->res_pool->abm;
1029 struct dmub_srv_hw_params hw_params;
1030 enum dmub_status status;
1031 const unsigned char *fw_inst_const, *fw_bss_data;
1032 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033 bool has_hw_support;
1036 /* DMUB isn't supported on the ASIC. */
1040 DRM_ERROR("No framebuffer info for DMUB service.\n");
1045 /* Firmware required for DMUB support. */
1046 DRM_ERROR("No firmware provided for DMUB.\n");
1050 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051 if (status != DMUB_STATUS_OK) {
1052 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1056 if (!has_hw_support) {
1057 DRM_INFO("DMUB unsupported on ASIC\n");
1061 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062 status = dmub_srv_hw_reset(dmub_srv);
1063 if (status != DMUB_STATUS_OK)
1064 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1066 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1068 fw_inst_const = dmub_fw->data +
1069 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1072 fw_bss_data = dmub_fw->data +
1073 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074 le32_to_cpu(hdr->inst_const_bytes);
1076 /* Copy firmware and bios info into FB memory. */
1077 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1080 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1082 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083 * amdgpu_ucode_init_single_fw will load dmub firmware
1084 * fw_inst_const part to cw0; otherwise, the firmware back door load
1085 * will be done by dm_dmub_hw_init
1087 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089 fw_inst_const_size);
1092 if (fw_bss_data_size)
1093 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094 fw_bss_data, fw_bss_data_size);
1096 /* Copy firmware bios info into FB memory. */
1097 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1100 /* Reset regions that need to be reset. */
1101 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1104 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1107 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1110 /* Initialize hardware. */
1111 memset(&hw_params, 0, sizeof(hw_params));
1112 hw_params.fb_base = adev->gmc.fb_start;
1113 hw_params.fb_offset = adev->gmc.aper_base;
1115 /* backdoor load firmware and trigger dmub running */
1116 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117 hw_params.load_inst_const = true;
1120 hw_params.psp_version = dmcu->psp_version;
1122 for (i = 0; i < fb_info->num_fb; ++i)
1123 hw_params.fb[i] = &fb_info->fb[i];
1125 switch (adev->ip_versions[DCE_HWIP][0]) {
1126 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127 hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1136 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137 if (status != DMUB_STATUS_OK) {
1138 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1142 /* Wait for firmware load to finish. */
1143 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144 if (status != DMUB_STATUS_OK)
1145 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1147 /* Init DMCU and ABM if available. */
1149 dmcu->funcs->dmcu_init(dmcu);
1150 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1153 if (!adev->dm.dc->ctx->dmub_srv)
1154 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155 if (!adev->dm.dc->ctx->dmub_srv) {
1156 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1160 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161 adev->dm.dmcub_fw_version);
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1168 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169 enum dmub_status status;
1173 /* DMUB isn't supported on the ASIC. */
1177 status = dmub_srv_is_hw_init(dmub_srv, &init);
1178 if (status != DMUB_STATUS_OK)
1179 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1181 if (status == DMUB_STATUS_OK && init) {
1182 /* Wait for firmware load to finish. */
1183 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184 if (status != DMUB_STATUS_OK)
1185 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1187 /* Perform the full hardware initialization. */
1188 dm_dmub_hw_init(adev);
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1196 uint32_t logical_addr_low;
1197 uint32_t logical_addr_high;
1198 uint32_t agp_base, agp_bot, agp_top;
1199 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1201 memset(pa_config, 0, sizeof(*pa_config));
1203 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1206 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1208 * Raven2 has a HW issue that it is unable to use the vram which
1209 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210 * workaround that increase system aperture high address (add 1)
1211 * to get rid of the VM fault and hardware hang.
1213 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1215 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1218 agp_bot = adev->gmc.agp_start >> 24;
1219 agp_top = adev->gmc.agp_end >> 24;
1222 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227 page_table_base.low_part = lower_32_bits(pt_base);
1229 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1232 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1236 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1240 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1244 pa_config->is_hvm_enabled = 0;
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1251 struct vblank_control_work *vblank_work =
1252 container_of(work, struct vblank_control_work, work);
1253 struct amdgpu_display_manager *dm = vblank_work->dm;
1255 mutex_lock(&dm->dc_lock);
1257 if (vblank_work->enable)
1258 dm->active_vblank_irq_count++;
1259 else if(dm->active_vblank_irq_count)
1260 dm->active_vblank_irq_count--;
1262 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1264 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1266 /* Control PSR based on vblank requirements from OS */
1267 if (vblank_work->stream && vblank_work->stream->link) {
1268 if (vblank_work->enable) {
1269 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270 amdgpu_dm_psr_disable(vblank_work->stream);
1271 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274 amdgpu_dm_psr_enable(vblank_work->stream);
1278 mutex_unlock(&dm->dc_lock);
1280 dc_stream_release(vblank_work->stream);
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1289 struct hpd_rx_irq_offload_work *offload_work;
1290 struct amdgpu_dm_connector *aconnector;
1291 struct dc_link *dc_link;
1292 struct amdgpu_device *adev;
1293 enum dc_connection_type new_connection_type = dc_connection_none;
1294 unsigned long flags;
1296 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 aconnector = offload_work->offload_wq->aconnector;
1300 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1304 adev = drm_to_adev(aconnector->base.dev);
1305 dc_link = aconnector->dc_link;
1307 mutex_lock(&aconnector->hpd_lock);
1308 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 DRM_ERROR("KMS: Failed to detect connector\n");
1310 mutex_unlock(&aconnector->hpd_lock);
1312 if (new_connection_type == dc_connection_none)
1315 if (amdgpu_in_reset(adev))
1318 mutex_lock(&adev->dm.dc_lock);
1319 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 dc_link_dp_handle_automated_test(dc_link);
1321 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 dc_link_dp_handle_link_loss(dc_link);
1325 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 offload_work->offload_wq->is_handling_link_loss = false;
1327 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1329 mutex_unlock(&adev->dm.dc_lock);
1332 kfree(offload_work);
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1338 int max_caps = dc->caps.max_links;
1340 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1342 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1344 if (!hpd_rx_offload_wq)
1348 for (i = 0; i < max_caps; i++) {
1349 hpd_rx_offload_wq[i].wq =
1350 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1352 if (hpd_rx_offload_wq[i].wq == NULL) {
1353 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1357 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1360 return hpd_rx_offload_wq;
1363 struct amdgpu_stutter_quirk {
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1379 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1381 while (p && p->chip_device != 0) {
1382 if (pdev->vendor == p->chip_vendor &&
1383 pdev->device == p->chip_device &&
1384 pdev->subsystem_vendor == p->subsys_vendor &&
1385 pdev->subsystem_device == p->subsys_device &&
1386 pdev->revision == p->revision) {
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1396 struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398 struct dc_callback_init init_params;
1402 adev->dm.ddev = adev_to_drm(adev);
1403 adev->dm.adev = adev;
1405 /* Zero all the fields */
1406 memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 memset(&init_params, 0, sizeof(init_params));
1411 mutex_init(&adev->dm.dc_lock);
1412 mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414 spin_lock_init(&adev->dm.vblank_lock);
1417 if(amdgpu_dm_irq_init(adev)) {
1418 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1422 init_data.asic_id.chip_family = adev->family;
1424 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426 init_data.asic_id.chip_id = adev->pdev->device;
1428 init_data.asic_id.vram_width = adev->gmc.vram_width;
1429 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430 init_data.asic_id.atombios_base_address =
1431 adev->mode_info.atom_context->bios;
1433 init_data.driver = adev;
1435 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1437 if (!adev->dm.cgs_device) {
1438 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1442 init_data.cgs_device = adev->dm.cgs_device;
1444 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1446 switch (adev->ip_versions[DCE_HWIP][0]) {
1447 case IP_VERSION(2, 1, 0):
1448 switch (adev->dm.dmcub_fw_version) {
1449 case 0: /* development */
1450 case 0x1: /* linux-firmware.git hash 6d9f399 */
1451 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452 init_data.flags.disable_dmcu = false;
1455 init_data.flags.disable_dmcu = true;
1458 case IP_VERSION(2, 0, 3):
1459 init_data.flags.disable_dmcu = true;
1465 switch (adev->asic_type) {
1468 init_data.flags.gpu_vm_support = true;
1471 switch (adev->ip_versions[DCE_HWIP][0]) {
1472 case IP_VERSION(1, 0, 0):
1473 case IP_VERSION(1, 0, 1):
1474 /* enable S/G on PCO and RV2 */
1475 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476 (adev->apu_flags & AMD_APU_IS_PICASSO))
1477 init_data.flags.gpu_vm_support = true;
1479 case IP_VERSION(2, 1, 0):
1480 case IP_VERSION(3, 0, 1):
1481 case IP_VERSION(3, 1, 2):
1482 case IP_VERSION(3, 1, 3):
1483 case IP_VERSION(3, 1, 5):
1484 init_data.flags.gpu_vm_support = true;
1492 if (init_data.flags.gpu_vm_support)
1493 adev->mode_info.gpu_vm_support = true;
1495 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1496 init_data.flags.fbc_support = true;
1498 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1499 init_data.flags.multi_mon_pp_mclk_switch = true;
1501 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1502 init_data.flags.disable_fractional_pwm = true;
1504 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1505 init_data.flags.edp_no_power_sequencing = true;
1507 #ifdef CONFIG_DRM_AMD_DC_DCN
1508 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1509 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1510 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1511 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1514 init_data.flags.seamless_boot_edp_requested = false;
1516 if (check_seamless_boot_capability(adev)) {
1517 init_data.flags.seamless_boot_edp_requested = true;
1518 init_data.flags.allow_seamless_boot_optimization = true;
1519 DRM_INFO("Seamless boot condition check passed\n");
1522 INIT_LIST_HEAD(&adev->dm.da_list);
1523 /* Display Core create. */
1524 adev->dm.dc = dc_create(&init_data);
1527 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1529 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1533 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1534 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1535 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1538 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1539 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1540 if (dm_should_disable_stutter(adev->pdev))
1541 adev->dm.dc->debug.disable_stutter = true;
1543 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1544 adev->dm.dc->debug.disable_stutter = true;
1546 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1547 adev->dm.dc->debug.disable_dsc = true;
1548 adev->dm.dc->debug.disable_dsc_edp = true;
1551 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1552 adev->dm.dc->debug.disable_clock_gate = true;
1554 r = dm_dmub_hw_init(adev);
1556 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1560 dc_hardware_init(adev->dm.dc);
1562 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1563 if (!adev->dm.hpd_rx_offload_wq) {
1564 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1568 #if defined(CONFIG_DRM_AMD_DC_DCN)
1569 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1570 struct dc_phy_addr_space_config pa_config;
1572 mmhub_read_system_context(adev, &pa_config);
1574 // Call the DC init_memory func
1575 dc_setup_system_context(adev->dm.dc, &pa_config);
1579 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1580 if (!adev->dm.freesync_module) {
1582 "amdgpu: failed to initialize freesync_module.\n");
1584 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1585 adev->dm.freesync_module);
1587 amdgpu_dm_init_color_mod();
1589 #if defined(CONFIG_DRM_AMD_DC_DCN)
1590 if (adev->dm.dc->caps.max_links > 0) {
1591 adev->dm.vblank_control_workqueue =
1592 create_singlethread_workqueue("dm_vblank_control_workqueue");
1593 if (!adev->dm.vblank_control_workqueue)
1594 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1598 #ifdef CONFIG_DRM_AMD_DC_HDCP
1599 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1600 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1602 if (!adev->dm.hdcp_workqueue)
1603 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1605 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1607 dc_init_callbacks(adev->dm.dc, &init_params);
1610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1611 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1613 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1614 init_completion(&adev->dm.dmub_aux_transfer_done);
1615 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1616 if (!adev->dm.dmub_notify) {
1617 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1621 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1622 if (!adev->dm.delayed_hpd_wq) {
1623 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1627 amdgpu_dm_outbox_init(adev);
1628 #if defined(CONFIG_DRM_AMD_DC_DCN)
1629 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1630 dmub_aux_setconfig_callback, false)) {
1631 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1634 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1635 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1638 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1639 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1642 #endif /* CONFIG_DRM_AMD_DC_DCN */
1645 if (amdgpu_dm_initialize_drm_device(adev)) {
1647 "amdgpu: failed to initialize sw for display support.\n");
1651 /* create fake encoders for MST */
1652 dm_dp_create_fake_mst_encoders(adev);
1654 /* TODO: Add_display_info? */
1656 /* TODO use dynamic cursor width */
1657 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1658 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1660 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1662 "amdgpu: failed to initialize sw for display support.\n");
1667 DRM_DEBUG_DRIVER("KMS initialized.\n");
1671 amdgpu_dm_fini(adev);
1676 static int amdgpu_dm_early_fini(void *handle)
1678 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680 amdgpu_dm_audio_fini(adev);
1685 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1689 #if defined(CONFIG_DRM_AMD_DC_DCN)
1690 if (adev->dm.vblank_control_workqueue) {
1691 destroy_workqueue(adev->dm.vblank_control_workqueue);
1692 adev->dm.vblank_control_workqueue = NULL;
1696 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1697 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1700 amdgpu_dm_destroy_drm_device(&adev->dm);
1702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1703 if (adev->dm.crc_rd_wrk) {
1704 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1705 kfree(adev->dm.crc_rd_wrk);
1706 adev->dm.crc_rd_wrk = NULL;
1709 #ifdef CONFIG_DRM_AMD_DC_HDCP
1710 if (adev->dm.hdcp_workqueue) {
1711 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1712 adev->dm.hdcp_workqueue = NULL;
1716 dc_deinit_callbacks(adev->dm.dc);
1719 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1721 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1722 kfree(adev->dm.dmub_notify);
1723 adev->dm.dmub_notify = NULL;
1724 destroy_workqueue(adev->dm.delayed_hpd_wq);
1725 adev->dm.delayed_hpd_wq = NULL;
1728 if (adev->dm.dmub_bo)
1729 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1730 &adev->dm.dmub_bo_gpu_addr,
1731 &adev->dm.dmub_bo_cpu_addr);
1733 if (adev->dm.hpd_rx_offload_wq) {
1734 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1735 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1736 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1737 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1741 kfree(adev->dm.hpd_rx_offload_wq);
1742 adev->dm.hpd_rx_offload_wq = NULL;
1745 /* DC Destroy TODO: Replace destroy DAL */
1747 dc_destroy(&adev->dm.dc);
1749 * TODO: pageflip, vlank interrupt
1751 * amdgpu_dm_irq_fini(adev);
1754 if (adev->dm.cgs_device) {
1755 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1756 adev->dm.cgs_device = NULL;
1758 if (adev->dm.freesync_module) {
1759 mod_freesync_destroy(adev->dm.freesync_module);
1760 adev->dm.freesync_module = NULL;
1763 mutex_destroy(&adev->dm.audio_lock);
1764 mutex_destroy(&adev->dm.dc_lock);
1769 static int load_dmcu_fw(struct amdgpu_device *adev)
1771 const char *fw_name_dmcu = NULL;
1773 const struct dmcu_firmware_header_v1_0 *hdr;
1775 switch(adev->asic_type) {
1776 #if defined(CONFIG_DRM_AMD_DC_SI)
1791 case CHIP_POLARIS11:
1792 case CHIP_POLARIS10:
1793 case CHIP_POLARIS12:
1800 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1803 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1804 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1805 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1806 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1811 switch (adev->ip_versions[DCE_HWIP][0]) {
1812 case IP_VERSION(2, 0, 2):
1813 case IP_VERSION(2, 0, 3):
1814 case IP_VERSION(2, 0, 0):
1815 case IP_VERSION(2, 1, 0):
1816 case IP_VERSION(3, 0, 0):
1817 case IP_VERSION(3, 0, 2):
1818 case IP_VERSION(3, 0, 3):
1819 case IP_VERSION(3, 0, 1):
1820 case IP_VERSION(3, 1, 2):
1821 case IP_VERSION(3, 1, 3):
1822 case IP_VERSION(3, 1, 5):
1823 case IP_VERSION(3, 1, 6):
1828 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1832 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1833 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1837 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1839 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1840 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1841 adev->dm.fw_dmcu = NULL;
1845 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1850 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1852 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1854 release_firmware(adev->dm.fw_dmcu);
1855 adev->dm.fw_dmcu = NULL;
1859 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1860 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1861 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1862 adev->firmware.fw_size +=
1863 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1865 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1866 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1867 adev->firmware.fw_size +=
1868 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1872 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1877 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1879 struct amdgpu_device *adev = ctx;
1881 return dm_read_reg(adev->dm.dc->ctx, address);
1884 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1887 struct amdgpu_device *adev = ctx;
1889 return dm_write_reg(adev->dm.dc->ctx, address, value);
1892 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1894 struct dmub_srv_create_params create_params;
1895 struct dmub_srv_region_params region_params;
1896 struct dmub_srv_region_info region_info;
1897 struct dmub_srv_fb_params fb_params;
1898 struct dmub_srv_fb_info *fb_info;
1899 struct dmub_srv *dmub_srv;
1900 const struct dmcub_firmware_header_v1_0 *hdr;
1901 const char *fw_name_dmub;
1902 enum dmub_asic dmub_asic;
1903 enum dmub_status status;
1906 switch (adev->ip_versions[DCE_HWIP][0]) {
1907 case IP_VERSION(2, 1, 0):
1908 dmub_asic = DMUB_ASIC_DCN21;
1909 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1910 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1911 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1913 case IP_VERSION(3, 0, 0):
1914 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1915 dmub_asic = DMUB_ASIC_DCN30;
1916 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1918 dmub_asic = DMUB_ASIC_DCN30;
1919 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1922 case IP_VERSION(3, 0, 1):
1923 dmub_asic = DMUB_ASIC_DCN301;
1924 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1926 case IP_VERSION(3, 0, 2):
1927 dmub_asic = DMUB_ASIC_DCN302;
1928 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1930 case IP_VERSION(3, 0, 3):
1931 dmub_asic = DMUB_ASIC_DCN303;
1932 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1934 case IP_VERSION(3, 1, 2):
1935 case IP_VERSION(3, 1, 3):
1936 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1937 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1939 case IP_VERSION(3, 1, 5):
1940 dmub_asic = DMUB_ASIC_DCN315;
1941 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1943 case IP_VERSION(3, 1, 6):
1944 dmub_asic = DMUB_ASIC_DCN316;
1945 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1948 /* ASIC doesn't support DMUB. */
1952 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1958 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1964 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1965 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1967 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1968 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1969 AMDGPU_UCODE_ID_DMCUB;
1970 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972 adev->firmware.fw_size +=
1973 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1975 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1976 adev->dm.dmcub_fw_version);
1980 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1981 dmub_srv = adev->dm.dmub_srv;
1984 DRM_ERROR("Failed to allocate DMUB service!\n");
1988 memset(&create_params, 0, sizeof(create_params));
1989 create_params.user_ctx = adev;
1990 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1991 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1992 create_params.asic = dmub_asic;
1994 /* Create the DMUB service. */
1995 status = dmub_srv_create(dmub_srv, &create_params);
1996 if (status != DMUB_STATUS_OK) {
1997 DRM_ERROR("Error creating DMUB service: %d\n", status);
2001 /* Calculate the size of all the regions for the DMUB service. */
2002 memset(®ion_params, 0, sizeof(region_params));
2004 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2005 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2006 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2007 region_params.vbios_size = adev->bios_size;
2008 region_params.fw_bss_data = region_params.bss_data_size ?
2009 adev->dm.dmub_fw->data +
2010 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2011 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2012 region_params.fw_inst_const =
2013 adev->dm.dmub_fw->data +
2014 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2017 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2020 if (status != DMUB_STATUS_OK) {
2021 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2026 * Allocate a framebuffer based on the total size of all the regions.
2027 * TODO: Move this into GART.
2029 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2030 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2031 &adev->dm.dmub_bo_gpu_addr,
2032 &adev->dm.dmub_bo_cpu_addr);
2036 /* Rebase the regions on the framebuffer address. */
2037 memset(&fb_params, 0, sizeof(fb_params));
2038 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2039 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2040 fb_params.region_info = ®ion_info;
2042 adev->dm.dmub_fb_info =
2043 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2044 fb_info = adev->dm.dmub_fb_info;
2048 "Failed to allocate framebuffer info for DMUB service!\n");
2052 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2053 if (status != DMUB_STATUS_OK) {
2054 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2061 static int dm_sw_init(void *handle)
2063 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2066 r = dm_dmub_sw_init(adev);
2070 return load_dmcu_fw(adev);
2073 static int dm_sw_fini(void *handle)
2075 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077 kfree(adev->dm.dmub_fb_info);
2078 adev->dm.dmub_fb_info = NULL;
2080 if (adev->dm.dmub_srv) {
2081 dmub_srv_destroy(adev->dm.dmub_srv);
2082 adev->dm.dmub_srv = NULL;
2085 release_firmware(adev->dm.dmub_fw);
2086 adev->dm.dmub_fw = NULL;
2088 release_firmware(adev->dm.fw_dmcu);
2089 adev->dm.fw_dmcu = NULL;
2094 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2096 struct amdgpu_dm_connector *aconnector;
2097 struct drm_connector *connector;
2098 struct drm_connector_list_iter iter;
2101 drm_connector_list_iter_begin(dev, &iter);
2102 drm_for_each_connector_iter(connector, &iter) {
2103 aconnector = to_amdgpu_dm_connector(connector);
2104 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2105 aconnector->mst_mgr.aux) {
2106 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2108 aconnector->base.base.id);
2110 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112 DRM_ERROR("DM_MST: Failed to start MST\n");
2113 aconnector->dc_link->type =
2114 dc_connection_single;
2119 drm_connector_list_iter_end(&iter);
2124 static int dm_late_init(void *handle)
2126 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128 struct dmcu_iram_parameters params;
2129 unsigned int linear_lut[16];
2131 struct dmcu *dmcu = NULL;
2133 dmcu = adev->dm.dc->res_pool->dmcu;
2135 for (i = 0; i < 16; i++)
2136 linear_lut[i] = 0xFFFF * i / 15;
2139 params.backlight_ramping_override = false;
2140 params.backlight_ramping_start = 0xCCCC;
2141 params.backlight_ramping_reduction = 0xCCCCCCCC;
2142 params.backlight_lut_array_size = 16;
2143 params.backlight_lut_array = linear_lut;
2145 /* Min backlight level after ABM reduction, Don't allow below 1%
2146 * 0xFFFF x 0.01 = 0x28F
2148 params.min_abm_backlight = 0x28F;
2149 /* In the case where abm is implemented on dmcub,
2150 * dmcu object will be null.
2151 * ABM 2.4 and up are implemented on dmcub.
2154 if (!dmcu_load_iram(dmcu, params))
2156 } else if (adev->dm.dc->ctx->dmub_srv) {
2157 struct dc_link *edp_links[MAX_NUM_EDP];
2160 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2161 for (i = 0; i < edp_num; i++) {
2162 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2167 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2170 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172 struct amdgpu_dm_connector *aconnector;
2173 struct drm_connector *connector;
2174 struct drm_connector_list_iter iter;
2175 struct drm_dp_mst_topology_mgr *mgr;
2177 bool need_hotplug = false;
2179 drm_connector_list_iter_begin(dev, &iter);
2180 drm_for_each_connector_iter(connector, &iter) {
2181 aconnector = to_amdgpu_dm_connector(connector);
2182 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2183 aconnector->mst_port)
2186 mgr = &aconnector->mst_mgr;
2189 drm_dp_mst_topology_mgr_suspend(mgr);
2191 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2193 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2194 need_hotplug = true;
2198 drm_connector_list_iter_end(&iter);
2201 drm_kms_helper_hotplug_event(dev);
2204 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2208 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2209 * on window driver dc implementation.
2210 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2211 * should be passed to smu during boot up and resume from s3.
2212 * boot up: dc calculate dcn watermark clock settings within dc_create,
2213 * dcn20_resource_construct
2214 * then call pplib functions below to pass the settings to smu:
2215 * smu_set_watermarks_for_clock_ranges
2216 * smu_set_watermarks_table
2217 * navi10_set_watermarks_table
2218 * smu_write_watermarks_table
2220 * For Renoir, clock settings of dcn watermark are also fixed values.
2221 * dc has implemented different flow for window driver:
2222 * dc_hardware_init / dc_set_power_state
2227 * smu_set_watermarks_for_clock_ranges
2228 * renoir_set_watermarks_table
2229 * smu_write_watermarks_table
2232 * dc_hardware_init -> amdgpu_dm_init
2233 * dc_set_power_state --> dm_resume
2235 * therefore, this function apply to navi10/12/14 but not Renoir
2238 switch (adev->ip_versions[DCE_HWIP][0]) {
2239 case IP_VERSION(2, 0, 2):
2240 case IP_VERSION(2, 0, 0):
2246 ret = amdgpu_dpm_write_watermarks_table(adev);
2248 DRM_ERROR("Failed to update WMTABLE!\n");
2256 * dm_hw_init() - Initialize DC device
2257 * @handle: The base driver device containing the amdgpu_dm device.
2259 * Initialize the &struct amdgpu_display_manager device. This involves calling
2260 * the initializers of each DM component, then populating the struct with them.
2262 * Although the function implies hardware initialization, both hardware and
2263 * software are initialized here. Splitting them out to their relevant init
2264 * hooks is a future TODO item.
2266 * Some notable things that are initialized here:
2268 * - Display Core, both software and hardware
2269 * - DC modules that we need (freesync and color management)
2270 * - DRM software states
2271 * - Interrupt sources and handlers
2273 * - Debug FS entries, if enabled
2275 static int dm_hw_init(void *handle)
2277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278 /* Create DAL display manager */
2279 amdgpu_dm_init(adev);
2280 amdgpu_dm_hpd_init(adev);
2286 * dm_hw_fini() - Teardown DC device
2287 * @handle: The base driver device containing the amdgpu_dm device.
2289 * Teardown components within &struct amdgpu_display_manager that require
2290 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2291 * were loaded. Also flush IRQ workqueues and disable them.
2293 static int dm_hw_fini(void *handle)
2295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297 amdgpu_dm_hpd_fini(adev);
2299 amdgpu_dm_irq_fini(adev);
2300 amdgpu_dm_fini(adev);
2305 static int dm_enable_vblank(struct drm_crtc *crtc);
2306 static void dm_disable_vblank(struct drm_crtc *crtc);
2308 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2309 struct dc_state *state, bool enable)
2311 enum dc_irq_source irq_source;
2312 struct amdgpu_crtc *acrtc;
2316 for (i = 0; i < state->stream_count; i++) {
2317 acrtc = get_crtc_by_otg_inst(
2318 adev, state->stream_status[i].primary_otg_inst);
2320 if (acrtc && state->stream_status[i].plane_count != 0) {
2321 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2322 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2323 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2324 acrtc->crtc_id, enable ? "en" : "dis", rc);
2326 DRM_WARN("Failed to %s pflip interrupts\n",
2327 enable ? "enable" : "disable");
2330 rc = dm_enable_vblank(&acrtc->base);
2332 DRM_WARN("Failed to enable vblank interrupts\n");
2334 dm_disable_vblank(&acrtc->base);
2342 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 struct dc_state *context = NULL;
2345 enum dc_status res = DC_ERROR_UNEXPECTED;
2347 struct dc_stream_state *del_streams[MAX_PIPES];
2348 int del_streams_count = 0;
2350 memset(del_streams, 0, sizeof(del_streams));
2352 context = dc_create_state(dc);
2353 if (context == NULL)
2354 goto context_alloc_fail;
2356 dc_resource_state_copy_construct_current(dc, context);
2358 /* First remove from context all streams */
2359 for (i = 0; i < context->stream_count; i++) {
2360 struct dc_stream_state *stream = context->streams[i];
2362 del_streams[del_streams_count++] = stream;
2365 /* Remove all planes for removed streams and then remove the streams */
2366 for (i = 0; i < del_streams_count; i++) {
2367 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2368 res = DC_FAIL_DETACH_SURFACES;
2372 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2377 res = dc_commit_state(dc, context);
2380 dc_release_state(context);
2386 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2390 if (dm->hpd_rx_offload_wq) {
2391 for (i = 0; i < dm->dc->caps.max_links; i++)
2392 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2396 static int dm_suspend(void *handle)
2398 struct amdgpu_device *adev = handle;
2399 struct amdgpu_display_manager *dm = &adev->dm;
2402 if (amdgpu_in_reset(adev)) {
2403 mutex_lock(&dm->dc_lock);
2405 #if defined(CONFIG_DRM_AMD_DC_DCN)
2406 dc_allow_idle_optimizations(adev->dm.dc, false);
2409 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2411 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2413 amdgpu_dm_commit_zero_streams(dm->dc);
2415 amdgpu_dm_irq_suspend(adev);
2417 hpd_rx_irq_work_suspend(dm);
2422 WARN_ON(adev->dm.cached_state);
2423 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2425 s3_handle_mst(adev_to_drm(adev), true);
2427 amdgpu_dm_irq_suspend(adev);
2429 hpd_rx_irq_work_suspend(dm);
2431 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2436 struct amdgpu_dm_connector *
2437 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2438 struct drm_crtc *crtc)
2441 struct drm_connector_state *new_con_state;
2442 struct drm_connector *connector;
2443 struct drm_crtc *crtc_from_state;
2445 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2446 crtc_from_state = new_con_state->crtc;
2448 if (crtc_from_state == crtc)
2449 return to_amdgpu_dm_connector(connector);
2455 static void emulated_link_detect(struct dc_link *link)
2457 struct dc_sink_init_data sink_init_data = { 0 };
2458 struct display_sink_capability sink_caps = { 0 };
2459 enum dc_edid_status edid_status;
2460 struct dc_context *dc_ctx = link->ctx;
2461 struct dc_sink *sink = NULL;
2462 struct dc_sink *prev_sink = NULL;
2464 link->type = dc_connection_none;
2465 prev_sink = link->local_sink;
2468 dc_sink_release(prev_sink);
2470 switch (link->connector_signal) {
2471 case SIGNAL_TYPE_HDMI_TYPE_A: {
2472 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2473 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2477 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2478 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2479 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2483 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2484 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2489 case SIGNAL_TYPE_LVDS: {
2490 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491 sink_caps.signal = SIGNAL_TYPE_LVDS;
2495 case SIGNAL_TYPE_EDP: {
2496 sink_caps.transaction_type =
2497 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2498 sink_caps.signal = SIGNAL_TYPE_EDP;
2502 case SIGNAL_TYPE_DISPLAY_PORT: {
2503 sink_caps.transaction_type =
2504 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2505 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2510 DC_ERROR("Invalid connector type! signal:%d\n",
2511 link->connector_signal);
2515 sink_init_data.link = link;
2516 sink_init_data.sink_signal = sink_caps.signal;
2518 sink = dc_sink_create(&sink_init_data);
2520 DC_ERROR("Failed to create sink!\n");
2524 /* dc_sink_create returns a new reference */
2525 link->local_sink = sink;
2527 edid_status = dm_helpers_read_local_edid(
2532 if (edid_status != EDID_OK)
2533 DC_ERROR("Failed to read EDID");
2537 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2538 struct amdgpu_display_manager *dm)
2541 struct dc_surface_update surface_updates[MAX_SURFACES];
2542 struct dc_plane_info plane_infos[MAX_SURFACES];
2543 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2544 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2545 struct dc_stream_update stream_update;
2549 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2552 dm_error("Failed to allocate update bundle\n");
2556 for (k = 0; k < dc_state->stream_count; k++) {
2557 bundle->stream_update.stream = dc_state->streams[k];
2559 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2560 bundle->surface_updates[m].surface =
2561 dc_state->stream_status->plane_states[m];
2562 bundle->surface_updates[m].surface->force_full_update =
2565 dc_commit_updates_for_stream(
2566 dm->dc, bundle->surface_updates,
2567 dc_state->stream_status->plane_count,
2568 dc_state->streams[k], &bundle->stream_update, dc_state);
2577 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2579 struct dc_stream_state *stream_state;
2580 struct amdgpu_dm_connector *aconnector = link->priv;
2581 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2582 struct dc_stream_update stream_update;
2583 bool dpms_off = true;
2585 memset(&stream_update, 0, sizeof(stream_update));
2586 stream_update.dpms_off = &dpms_off;
2588 mutex_lock(&adev->dm.dc_lock);
2589 stream_state = dc_stream_find_from_link(link);
2591 if (stream_state == NULL) {
2592 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2593 mutex_unlock(&adev->dm.dc_lock);
2597 stream_update.stream = stream_state;
2598 acrtc_state->force_dpms_off = true;
2599 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2600 stream_state, &stream_update,
2601 stream_state->ctx->dc->current_state);
2602 mutex_unlock(&adev->dm.dc_lock);
2605 static int dm_resume(void *handle)
2607 struct amdgpu_device *adev = handle;
2608 struct drm_device *ddev = adev_to_drm(adev);
2609 struct amdgpu_display_manager *dm = &adev->dm;
2610 struct amdgpu_dm_connector *aconnector;
2611 struct drm_connector *connector;
2612 struct drm_connector_list_iter iter;
2613 struct drm_crtc *crtc;
2614 struct drm_crtc_state *new_crtc_state;
2615 struct dm_crtc_state *dm_new_crtc_state;
2616 struct drm_plane *plane;
2617 struct drm_plane_state *new_plane_state;
2618 struct dm_plane_state *dm_new_plane_state;
2619 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2620 enum dc_connection_type new_connection_type = dc_connection_none;
2621 struct dc_state *dc_state;
2624 if (amdgpu_in_reset(adev)) {
2625 dc_state = dm->cached_dc_state;
2628 * The dc->current_state is backed up into dm->cached_dc_state
2629 * before we commit 0 streams.
2631 * DC will clear link encoder assignments on the real state
2632 * but the changes won't propagate over to the copy we made
2633 * before the 0 streams commit.
2635 * DC expects that link encoder assignments are *not* valid
2636 * when committing a state, so as a workaround we can copy
2637 * off of the current state.
2639 * We lose the previous assignments, but we had already
2640 * commit 0 streams anyway.
2642 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2644 if (dc_enable_dmub_notifications(adev->dm.dc))
2645 amdgpu_dm_outbox_init(adev);
2647 r = dm_dmub_hw_init(adev);
2649 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2651 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2654 amdgpu_dm_irq_resume_early(adev);
2656 for (i = 0; i < dc_state->stream_count; i++) {
2657 dc_state->streams[i]->mode_changed = true;
2658 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2659 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2664 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2666 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2668 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2670 dc_release_state(dm->cached_dc_state);
2671 dm->cached_dc_state = NULL;
2673 amdgpu_dm_irq_resume_late(adev);
2675 mutex_unlock(&dm->dc_lock);
2679 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2680 dc_release_state(dm_state->context);
2681 dm_state->context = dc_create_state(dm->dc);
2682 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2683 dc_resource_state_construct(dm->dc, dm_state->context);
2685 /* Re-enable outbox interrupts for DPIA. */
2686 if (dc_enable_dmub_notifications(adev->dm.dc))
2687 amdgpu_dm_outbox_init(adev);
2689 /* Before powering on DC we need to re-initialize DMUB. */
2690 dm_dmub_hw_resume(adev);
2692 /* power on hardware */
2693 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2695 /* program HPD filter */
2699 * early enable HPD Rx IRQ, should be done before set mode as short
2700 * pulse interrupts are used for MST
2702 amdgpu_dm_irq_resume_early(adev);
2704 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2705 s3_handle_mst(ddev, false);
2708 drm_connector_list_iter_begin(ddev, &iter);
2709 drm_for_each_connector_iter(connector, &iter) {
2710 aconnector = to_amdgpu_dm_connector(connector);
2713 * this is the case when traversing through already created
2714 * MST connectors, should be skipped
2716 if (aconnector->mst_port)
2719 mutex_lock(&aconnector->hpd_lock);
2720 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2721 DRM_ERROR("KMS: Failed to detect connector\n");
2723 if (aconnector->base.force && new_connection_type == dc_connection_none)
2724 emulated_link_detect(aconnector->dc_link);
2726 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2728 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2729 aconnector->fake_enable = false;
2731 if (aconnector->dc_sink)
2732 dc_sink_release(aconnector->dc_sink);
2733 aconnector->dc_sink = NULL;
2734 amdgpu_dm_update_connector_after_detect(aconnector);
2735 mutex_unlock(&aconnector->hpd_lock);
2737 drm_connector_list_iter_end(&iter);
2739 /* Force mode set in atomic commit */
2740 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2741 new_crtc_state->active_changed = true;
2744 * atomic_check is expected to create the dc states. We need to release
2745 * them here, since they were duplicated as part of the suspend
2748 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2749 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2750 if (dm_new_crtc_state->stream) {
2751 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2752 dc_stream_release(dm_new_crtc_state->stream);
2753 dm_new_crtc_state->stream = NULL;
2757 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2758 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2759 if (dm_new_plane_state->dc_state) {
2760 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2761 dc_plane_state_release(dm_new_plane_state->dc_state);
2762 dm_new_plane_state->dc_state = NULL;
2766 drm_atomic_helper_resume(ddev, dm->cached_state);
2768 dm->cached_state = NULL;
2770 amdgpu_dm_irq_resume_late(adev);
2772 amdgpu_dm_smu_write_watermarks_table(adev);
2780 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2781 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2782 * the base driver's device list to be initialized and torn down accordingly.
2784 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2787 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2789 .early_init = dm_early_init,
2790 .late_init = dm_late_init,
2791 .sw_init = dm_sw_init,
2792 .sw_fini = dm_sw_fini,
2793 .early_fini = amdgpu_dm_early_fini,
2794 .hw_init = dm_hw_init,
2795 .hw_fini = dm_hw_fini,
2796 .suspend = dm_suspend,
2797 .resume = dm_resume,
2798 .is_idle = dm_is_idle,
2799 .wait_for_idle = dm_wait_for_idle,
2800 .check_soft_reset = dm_check_soft_reset,
2801 .soft_reset = dm_soft_reset,
2802 .set_clockgating_state = dm_set_clockgating_state,
2803 .set_powergating_state = dm_set_powergating_state,
2806 const struct amdgpu_ip_block_version dm_ip_block =
2808 .type = AMD_IP_BLOCK_TYPE_DCE,
2812 .funcs = &amdgpu_dm_funcs,
2822 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2823 .fb_create = amdgpu_display_user_framebuffer_create,
2824 .get_format_info = amd_get_format_info,
2825 .output_poll_changed = drm_fb_helper_output_poll_changed,
2826 .atomic_check = amdgpu_dm_atomic_check,
2827 .atomic_commit = drm_atomic_helper_commit,
2830 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2831 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2834 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2836 u32 max_cll, min_cll, max, min, q, r;
2837 struct amdgpu_dm_backlight_caps *caps;
2838 struct amdgpu_display_manager *dm;
2839 struct drm_connector *conn_base;
2840 struct amdgpu_device *adev;
2841 struct dc_link *link = NULL;
2842 static const u8 pre_computed_values[] = {
2843 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2844 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2847 if (!aconnector || !aconnector->dc_link)
2850 link = aconnector->dc_link;
2851 if (link->connector_signal != SIGNAL_TYPE_EDP)
2854 conn_base = &aconnector->base;
2855 adev = drm_to_adev(conn_base->dev);
2857 for (i = 0; i < dm->num_of_edps; i++) {
2858 if (link == dm->backlight_link[i])
2861 if (i >= dm->num_of_edps)
2863 caps = &dm->backlight_caps[i];
2864 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2865 caps->aux_support = false;
2866 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2867 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2869 if (caps->ext_caps->bits.oled == 1 /*||
2870 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2871 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2872 caps->aux_support = true;
2874 if (amdgpu_backlight == 0)
2875 caps->aux_support = false;
2876 else if (amdgpu_backlight == 1)
2877 caps->aux_support = true;
2879 /* From the specification (CTA-861-G), for calculating the maximum
2880 * luminance we need to use:
2881 * Luminance = 50*2**(CV/32)
2882 * Where CV is a one-byte value.
2883 * For calculating this expression we may need float point precision;
2884 * to avoid this complexity level, we take advantage that CV is divided
2885 * by a constant. From the Euclids division algorithm, we know that CV
2886 * can be written as: CV = 32*q + r. Next, we replace CV in the
2887 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2888 * need to pre-compute the value of r/32. For pre-computing the values
2889 * We just used the following Ruby line:
2890 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2891 * The results of the above expressions can be verified at
2892 * pre_computed_values.
2896 max = (1 << q) * pre_computed_values[r];
2898 // min luminance: maxLum * (CV/255)^2 / 100
2899 q = DIV_ROUND_CLOSEST(min_cll, 255);
2900 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2902 caps->aux_max_input_signal = max;
2903 caps->aux_min_input_signal = min;
2906 void amdgpu_dm_update_connector_after_detect(
2907 struct amdgpu_dm_connector *aconnector)
2909 struct drm_connector *connector = &aconnector->base;
2910 struct drm_device *dev = connector->dev;
2911 struct dc_sink *sink;
2913 /* MST handled by drm_mst framework */
2914 if (aconnector->mst_mgr.mst_state == true)
2917 sink = aconnector->dc_link->local_sink;
2919 dc_sink_retain(sink);
2922 * Edid mgmt connector gets first update only in mode_valid hook and then
2923 * the connector sink is set to either fake or physical sink depends on link status.
2924 * Skip if already done during boot.
2926 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2927 && aconnector->dc_em_sink) {
2930 * For S3 resume with headless use eml_sink to fake stream
2931 * because on resume connector->sink is set to NULL
2933 mutex_lock(&dev->mode_config.mutex);
2936 if (aconnector->dc_sink) {
2937 amdgpu_dm_update_freesync_caps(connector, NULL);
2939 * retain and release below are used to
2940 * bump up refcount for sink because the link doesn't point
2941 * to it anymore after disconnect, so on next crtc to connector
2942 * reshuffle by UMD we will get into unwanted dc_sink release
2944 dc_sink_release(aconnector->dc_sink);
2946 aconnector->dc_sink = sink;
2947 dc_sink_retain(aconnector->dc_sink);
2948 amdgpu_dm_update_freesync_caps(connector,
2951 amdgpu_dm_update_freesync_caps(connector, NULL);
2952 if (!aconnector->dc_sink) {
2953 aconnector->dc_sink = aconnector->dc_em_sink;
2954 dc_sink_retain(aconnector->dc_sink);
2958 mutex_unlock(&dev->mode_config.mutex);
2961 dc_sink_release(sink);
2966 * TODO: temporary guard to look for proper fix
2967 * if this sink is MST sink, we should not do anything
2969 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2970 dc_sink_release(sink);
2974 if (aconnector->dc_sink == sink) {
2976 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2979 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2980 aconnector->connector_id);
2982 dc_sink_release(sink);
2986 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2987 aconnector->connector_id, aconnector->dc_sink, sink);
2989 mutex_lock(&dev->mode_config.mutex);
2992 * 1. Update status of the drm connector
2993 * 2. Send an event and let userspace tell us what to do
2997 * TODO: check if we still need the S3 mode update workaround.
2998 * If yes, put it here.
3000 if (aconnector->dc_sink) {
3001 amdgpu_dm_update_freesync_caps(connector, NULL);
3002 dc_sink_release(aconnector->dc_sink);
3005 aconnector->dc_sink = sink;
3006 dc_sink_retain(aconnector->dc_sink);
3007 if (sink->dc_edid.length == 0) {
3008 aconnector->edid = NULL;
3009 if (aconnector->dc_link->aux_mode) {
3010 drm_dp_cec_unset_edid(
3011 &aconnector->dm_dp_aux.aux);
3015 (struct edid *)sink->dc_edid.raw_edid;
3017 if (aconnector->dc_link->aux_mode)
3018 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3022 drm_connector_update_edid_property(connector, aconnector->edid);
3023 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3024 update_connector_ext_caps(aconnector);
3026 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3027 amdgpu_dm_update_freesync_caps(connector, NULL);
3028 drm_connector_update_edid_property(connector, NULL);
3029 aconnector->num_modes = 0;
3030 dc_sink_release(aconnector->dc_sink);
3031 aconnector->dc_sink = NULL;
3032 aconnector->edid = NULL;
3033 #ifdef CONFIG_DRM_AMD_DC_HDCP
3034 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3035 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3036 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3040 mutex_unlock(&dev->mode_config.mutex);
3042 update_subconnector_property(aconnector);
3045 dc_sink_release(sink);
3048 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3050 struct drm_connector *connector = &aconnector->base;
3051 struct drm_device *dev = connector->dev;
3052 enum dc_connection_type new_connection_type = dc_connection_none;
3053 struct amdgpu_device *adev = drm_to_adev(dev);
3054 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3055 struct dm_crtc_state *dm_crtc_state = NULL;
3057 if (adev->dm.disable_hpd_irq)
3060 if (dm_con_state->base.state && dm_con_state->base.crtc)
3061 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3062 dm_con_state->base.state,
3063 dm_con_state->base.crtc));
3065 * In case of failure or MST no need to update connector status or notify the OS
3066 * since (for MST case) MST does this in its own context.
3068 mutex_lock(&aconnector->hpd_lock);
3070 #ifdef CONFIG_DRM_AMD_DC_HDCP
3071 if (adev->dm.hdcp_workqueue) {
3072 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3073 dm_con_state->update_hdcp = true;
3076 if (aconnector->fake_enable)
3077 aconnector->fake_enable = false;
3079 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3080 DRM_ERROR("KMS: Failed to detect connector\n");
3082 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3083 emulated_link_detect(aconnector->dc_link);
3085 drm_modeset_lock_all(dev);
3086 dm_restore_drm_connector_state(dev, connector);
3087 drm_modeset_unlock_all(dev);
3089 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3090 drm_kms_helper_connector_hotplug_event(connector);
3092 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3093 if (new_connection_type == dc_connection_none &&
3094 aconnector->dc_link->type == dc_connection_none &&
3096 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3098 amdgpu_dm_update_connector_after_detect(aconnector);
3100 drm_modeset_lock_all(dev);
3101 dm_restore_drm_connector_state(dev, connector);
3102 drm_modeset_unlock_all(dev);
3104 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3105 drm_kms_helper_connector_hotplug_event(connector);
3107 mutex_unlock(&aconnector->hpd_lock);
3111 static void handle_hpd_irq(void *param)
3113 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3115 handle_hpd_irq_helper(aconnector);
3119 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3121 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3123 bool new_irq_handled = false;
3125 int dpcd_bytes_to_read;
3127 const int max_process_count = 30;
3128 int process_count = 0;
3130 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3132 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3133 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3134 /* DPCD 0x200 - 0x201 for downstream IRQ */
3135 dpcd_addr = DP_SINK_COUNT;
3137 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3138 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3139 dpcd_addr = DP_SINK_COUNT_ESI;
3142 dret = drm_dp_dpcd_read(
3143 &aconnector->dm_dp_aux.aux,
3146 dpcd_bytes_to_read);
3148 while (dret == dpcd_bytes_to_read &&
3149 process_count < max_process_count) {
3155 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3156 /* handle HPD short pulse irq */
3157 if (aconnector->mst_mgr.mst_state)
3159 &aconnector->mst_mgr,
3163 if (new_irq_handled) {
3164 /* ACK at DPCD to notify down stream */
3165 const int ack_dpcd_bytes_to_write =
3166 dpcd_bytes_to_read - 1;
3168 for (retry = 0; retry < 3; retry++) {
3171 wret = drm_dp_dpcd_write(
3172 &aconnector->dm_dp_aux.aux,
3175 ack_dpcd_bytes_to_write);
3176 if (wret == ack_dpcd_bytes_to_write)
3180 /* check if there is new irq to be handled */
3181 dret = drm_dp_dpcd_read(
3182 &aconnector->dm_dp_aux.aux,
3185 dpcd_bytes_to_read);
3187 new_irq_handled = false;
3193 if (process_count == max_process_count)
3194 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3197 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3198 union hpd_irq_data hpd_irq_data)
3200 struct hpd_rx_irq_offload_work *offload_work =
3201 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3203 if (!offload_work) {
3204 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3208 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3209 offload_work->data = hpd_irq_data;
3210 offload_work->offload_wq = offload_wq;
3212 queue_work(offload_wq->wq, &offload_work->work);
3213 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3216 static void handle_hpd_rx_irq(void *param)
3218 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3219 struct drm_connector *connector = &aconnector->base;
3220 struct drm_device *dev = connector->dev;
3221 struct dc_link *dc_link = aconnector->dc_link;
3222 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3223 bool result = false;
3224 enum dc_connection_type new_connection_type = dc_connection_none;
3225 struct amdgpu_device *adev = drm_to_adev(dev);
3226 union hpd_irq_data hpd_irq_data;
3227 bool link_loss = false;
3228 bool has_left_work = false;
3229 int idx = aconnector->base.index;
3230 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3232 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3234 if (adev->dm.disable_hpd_irq)
3238 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3239 * conflict, after implement i2c helper, this mutex should be
3242 mutex_lock(&aconnector->hpd_lock);
3244 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3245 &link_loss, true, &has_left_work);
3250 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3251 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3255 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3256 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3257 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3258 dm_handle_mst_sideband_msg(aconnector);
3265 spin_lock(&offload_wq->offload_lock);
3266 skip = offload_wq->is_handling_link_loss;
3269 offload_wq->is_handling_link_loss = true;
3271 spin_unlock(&offload_wq->offload_lock);
3274 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3281 if (result && !is_mst_root_connector) {
3282 /* Downstream Port status changed. */
3283 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3284 DRM_ERROR("KMS: Failed to detect connector\n");
3286 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3287 emulated_link_detect(dc_link);
3289 if (aconnector->fake_enable)
3290 aconnector->fake_enable = false;
3292 amdgpu_dm_update_connector_after_detect(aconnector);
3295 drm_modeset_lock_all(dev);
3296 dm_restore_drm_connector_state(dev, connector);
3297 drm_modeset_unlock_all(dev);
3299 drm_kms_helper_connector_hotplug_event(connector);
3300 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3302 if (aconnector->fake_enable)
3303 aconnector->fake_enable = false;
3305 amdgpu_dm_update_connector_after_detect(aconnector);
3308 drm_modeset_lock_all(dev);
3309 dm_restore_drm_connector_state(dev, connector);
3310 drm_modeset_unlock_all(dev);
3312 drm_kms_helper_connector_hotplug_event(connector);
3315 #ifdef CONFIG_DRM_AMD_DC_HDCP
3316 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3317 if (adev->dm.hdcp_workqueue)
3318 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3322 if (dc_link->type != dc_connection_mst_branch)
3323 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3325 mutex_unlock(&aconnector->hpd_lock);
3328 static void register_hpd_handlers(struct amdgpu_device *adev)
3330 struct drm_device *dev = adev_to_drm(adev);
3331 struct drm_connector *connector;
3332 struct amdgpu_dm_connector *aconnector;
3333 const struct dc_link *dc_link;
3334 struct dc_interrupt_params int_params = {0};
3336 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3337 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3339 list_for_each_entry(connector,
3340 &dev->mode_config.connector_list, head) {
3342 aconnector = to_amdgpu_dm_connector(connector);
3343 dc_link = aconnector->dc_link;
3345 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3346 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3347 int_params.irq_source = dc_link->irq_source_hpd;
3349 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3351 (void *) aconnector);
3354 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3356 /* Also register for DP short pulse (hpd_rx). */
3357 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3358 int_params.irq_source = dc_link->irq_source_hpd_rx;
3360 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3362 (void *) aconnector);
3364 if (adev->dm.hpd_rx_offload_wq)
3365 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3371 #if defined(CONFIG_DRM_AMD_DC_SI)
3372 /* Register IRQ sources and initialize IRQ callbacks */
3373 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3375 struct dc *dc = adev->dm.dc;
3376 struct common_irq_params *c_irq_params;
3377 struct dc_interrupt_params int_params = {0};
3380 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3382 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3383 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3386 * Actions of amdgpu_irq_add_id():
3387 * 1. Register a set() function with base driver.
3388 * Base driver will call set() function to enable/disable an
3389 * interrupt in DC hardware.
3390 * 2. Register amdgpu_dm_irq_handler().
3391 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3392 * coming from DC hardware.
3393 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3394 * for acknowledging and handling. */
3396 /* Use VBLANK interrupt */
3397 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3398 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3400 DRM_ERROR("Failed to add crtc irq id!\n");
3404 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3405 int_params.irq_source =
3406 dc_interrupt_to_irq_source(dc, i+1 , 0);
3408 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3410 c_irq_params->adev = adev;
3411 c_irq_params->irq_src = int_params.irq_source;
3413 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3414 dm_crtc_high_irq, c_irq_params);
3417 /* Use GRPH_PFLIP interrupt */
3418 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3419 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3420 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3422 DRM_ERROR("Failed to add page flip irq id!\n");
3426 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3427 int_params.irq_source =
3428 dc_interrupt_to_irq_source(dc, i, 0);
3430 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3432 c_irq_params->adev = adev;
3433 c_irq_params->irq_src = int_params.irq_source;
3435 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3436 dm_pflip_high_irq, c_irq_params);
3441 r = amdgpu_irq_add_id(adev, client_id,
3442 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3444 DRM_ERROR("Failed to add hpd irq id!\n");
3448 register_hpd_handlers(adev);
3454 /* Register IRQ sources and initialize IRQ callbacks */
3455 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3457 struct dc *dc = adev->dm.dc;
3458 struct common_irq_params *c_irq_params;
3459 struct dc_interrupt_params int_params = {0};
3462 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3464 if (adev->family >= AMDGPU_FAMILY_AI)
3465 client_id = SOC15_IH_CLIENTID_DCE;
3467 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3468 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3471 * Actions of amdgpu_irq_add_id():
3472 * 1. Register a set() function with base driver.
3473 * Base driver will call set() function to enable/disable an
3474 * interrupt in DC hardware.
3475 * 2. Register amdgpu_dm_irq_handler().
3476 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3477 * coming from DC hardware.
3478 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3479 * for acknowledging and handling. */
3481 /* Use VBLANK interrupt */
3482 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3483 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3485 DRM_ERROR("Failed to add crtc irq id!\n");
3489 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490 int_params.irq_source =
3491 dc_interrupt_to_irq_source(dc, i, 0);
3493 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3495 c_irq_params->adev = adev;
3496 c_irq_params->irq_src = int_params.irq_source;
3498 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499 dm_crtc_high_irq, c_irq_params);
3502 /* Use VUPDATE interrupt */
3503 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3504 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3506 DRM_ERROR("Failed to add vupdate irq id!\n");
3510 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3511 int_params.irq_source =
3512 dc_interrupt_to_irq_source(dc, i, 0);
3514 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3516 c_irq_params->adev = adev;
3517 c_irq_params->irq_src = int_params.irq_source;
3519 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3520 dm_vupdate_high_irq, c_irq_params);
3523 /* Use GRPH_PFLIP interrupt */
3524 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3525 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3526 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3528 DRM_ERROR("Failed to add page flip irq id!\n");
3532 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3533 int_params.irq_source =
3534 dc_interrupt_to_irq_source(dc, i, 0);
3536 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3538 c_irq_params->adev = adev;
3539 c_irq_params->irq_src = int_params.irq_source;
3541 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3542 dm_pflip_high_irq, c_irq_params);
3547 r = amdgpu_irq_add_id(adev, client_id,
3548 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3550 DRM_ERROR("Failed to add hpd irq id!\n");
3554 register_hpd_handlers(adev);
3559 #if defined(CONFIG_DRM_AMD_DC_DCN)
3560 /* Register IRQ sources and initialize IRQ callbacks */
3561 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3563 struct dc *dc = adev->dm.dc;
3564 struct common_irq_params *c_irq_params;
3565 struct dc_interrupt_params int_params = {0};
3568 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3569 static const unsigned int vrtl_int_srcid[] = {
3570 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3571 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3572 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3573 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3574 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3575 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3579 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3580 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3583 * Actions of amdgpu_irq_add_id():
3584 * 1. Register a set() function with base driver.
3585 * Base driver will call set() function to enable/disable an
3586 * interrupt in DC hardware.
3587 * 2. Register amdgpu_dm_irq_handler().
3588 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3589 * coming from DC hardware.
3590 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3591 * for acknowledging and handling.
3594 /* Use VSTARTUP interrupt */
3595 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3596 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3598 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3601 DRM_ERROR("Failed to add crtc irq id!\n");
3605 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3606 int_params.irq_source =
3607 dc_interrupt_to_irq_source(dc, i, 0);
3609 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3611 c_irq_params->adev = adev;
3612 c_irq_params->irq_src = int_params.irq_source;
3614 amdgpu_dm_irq_register_interrupt(
3615 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3618 /* Use otg vertical line interrupt */
3619 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3620 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3621 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3622 vrtl_int_srcid[i], &adev->vline0_irq);
3625 DRM_ERROR("Failed to add vline0 irq id!\n");
3629 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3630 int_params.irq_source =
3631 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3633 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3634 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3638 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3639 - DC_IRQ_SOURCE_DC1_VLINE0];
3641 c_irq_params->adev = adev;
3642 c_irq_params->irq_src = int_params.irq_source;
3644 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3645 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3649 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3650 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3651 * to trigger at end of each vblank, regardless of state of the lock,
3652 * matching DCE behaviour.
3654 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3655 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3657 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3660 DRM_ERROR("Failed to add vupdate irq id!\n");
3664 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3665 int_params.irq_source =
3666 dc_interrupt_to_irq_source(dc, i, 0);
3668 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3670 c_irq_params->adev = adev;
3671 c_irq_params->irq_src = int_params.irq_source;
3673 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3674 dm_vupdate_high_irq, c_irq_params);
3677 /* Use GRPH_PFLIP interrupt */
3678 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3679 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3681 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3683 DRM_ERROR("Failed to add page flip irq id!\n");
3687 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3688 int_params.irq_source =
3689 dc_interrupt_to_irq_source(dc, i, 0);
3691 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3693 c_irq_params->adev = adev;
3694 c_irq_params->irq_src = int_params.irq_source;
3696 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3697 dm_pflip_high_irq, c_irq_params);
3702 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3705 DRM_ERROR("Failed to add hpd irq id!\n");
3709 register_hpd_handlers(adev);
3713 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3714 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3716 struct dc *dc = adev->dm.dc;
3717 struct common_irq_params *c_irq_params;
3718 struct dc_interrupt_params int_params = {0};
3721 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3722 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3724 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3725 &adev->dmub_outbox_irq);
3727 DRM_ERROR("Failed to add outbox irq id!\n");
3731 if (dc->ctx->dmub_srv) {
3732 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3733 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3734 int_params.irq_source =
3735 dc_interrupt_to_irq_source(dc, i, 0);
3737 c_irq_params = &adev->dm.dmub_outbox_params[0];
3739 c_irq_params->adev = adev;
3740 c_irq_params->irq_src = int_params.irq_source;
3742 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3743 dm_dmub_outbox1_low_irq, c_irq_params);
3751 * Acquires the lock for the atomic state object and returns
3752 * the new atomic state.
3754 * This should only be called during atomic check.
3756 int dm_atomic_get_state(struct drm_atomic_state *state,
3757 struct dm_atomic_state **dm_state)
3759 struct drm_device *dev = state->dev;
3760 struct amdgpu_device *adev = drm_to_adev(dev);
3761 struct amdgpu_display_manager *dm = &adev->dm;
3762 struct drm_private_state *priv_state;
3767 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3768 if (IS_ERR(priv_state))
3769 return PTR_ERR(priv_state);
3771 *dm_state = to_dm_atomic_state(priv_state);
3776 static struct dm_atomic_state *
3777 dm_atomic_get_new_state(struct drm_atomic_state *state)
3779 struct drm_device *dev = state->dev;
3780 struct amdgpu_device *adev = drm_to_adev(dev);
3781 struct amdgpu_display_manager *dm = &adev->dm;
3782 struct drm_private_obj *obj;
3783 struct drm_private_state *new_obj_state;
3786 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3787 if (obj->funcs == dm->atomic_obj.funcs)
3788 return to_dm_atomic_state(new_obj_state);
3794 static struct drm_private_state *
3795 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3797 struct dm_atomic_state *old_state, *new_state;
3799 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3803 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3805 old_state = to_dm_atomic_state(obj->state);
3807 if (old_state && old_state->context)
3808 new_state->context = dc_copy_state(old_state->context);
3810 if (!new_state->context) {
3815 return &new_state->base;
3818 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3819 struct drm_private_state *state)
3821 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3823 if (dm_state && dm_state->context)
3824 dc_release_state(dm_state->context);
3829 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3830 .atomic_duplicate_state = dm_atomic_duplicate_state,
3831 .atomic_destroy_state = dm_atomic_destroy_state,
3834 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3836 struct dm_atomic_state *state;
3839 adev->mode_info.mode_config_initialized = true;
3841 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3842 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3844 adev_to_drm(adev)->mode_config.max_width = 16384;
3845 adev_to_drm(adev)->mode_config.max_height = 16384;
3847 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3848 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3849 /* indicates support for immediate flip */
3850 adev_to_drm(adev)->mode_config.async_page_flip = true;
3852 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3854 state = kzalloc(sizeof(*state), GFP_KERNEL);
3858 state->context = dc_create_state(adev->dm.dc);
3859 if (!state->context) {
3864 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3866 drm_atomic_private_obj_init(adev_to_drm(adev),
3867 &adev->dm.atomic_obj,
3869 &dm_atomic_state_funcs);
3871 r = amdgpu_display_modeset_create_props(adev);
3873 dc_release_state(state->context);
3878 r = amdgpu_dm_audio_init(adev);
3880 dc_release_state(state->context);
3888 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3889 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3890 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3892 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3893 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3895 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3898 #if defined(CONFIG_ACPI)
3899 struct amdgpu_dm_backlight_caps caps;
3901 memset(&caps, 0, sizeof(caps));
3903 if (dm->backlight_caps[bl_idx].caps_valid)
3906 amdgpu_acpi_get_backlight_caps(&caps);
3907 if (caps.caps_valid) {
3908 dm->backlight_caps[bl_idx].caps_valid = true;
3909 if (caps.aux_support)
3911 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3912 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3914 dm->backlight_caps[bl_idx].min_input_signal =
3915 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3916 dm->backlight_caps[bl_idx].max_input_signal =
3917 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3920 if (dm->backlight_caps[bl_idx].aux_support)
3923 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3924 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3928 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3929 unsigned *min, unsigned *max)
3934 if (caps->aux_support) {
3935 // Firmware limits are in nits, DC API wants millinits.
3936 *max = 1000 * caps->aux_max_input_signal;
3937 *min = 1000 * caps->aux_min_input_signal;
3939 // Firmware limits are 8-bit, PWM control is 16-bit.
3940 *max = 0x101 * caps->max_input_signal;
3941 *min = 0x101 * caps->min_input_signal;
3946 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3947 uint32_t brightness)
3951 if (!get_brightness_range(caps, &min, &max))
3954 // Rescale 0..255 to min..max
3955 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3956 AMDGPU_MAX_BL_LEVEL);
3959 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3960 uint32_t brightness)
3964 if (!get_brightness_range(caps, &min, &max))
3967 if (brightness < min)
3969 // Rescale min..max to 0..255
3970 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3974 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3976 u32 user_brightness)
3978 struct amdgpu_dm_backlight_caps caps;
3979 struct dc_link *link;
3983 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3984 caps = dm->backlight_caps[bl_idx];
3986 dm->brightness[bl_idx] = user_brightness;
3987 /* update scratch register */
3989 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3990 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3991 link = (struct dc_link *)dm->backlight_link[bl_idx];
3993 /* Change brightness based on AUX property */
3994 if (caps.aux_support) {
3995 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3996 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3998 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4000 rc = dc_link_set_backlight_level(link, brightness, 0);
4002 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4008 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4010 struct amdgpu_display_manager *dm = bl_get_data(bd);
4013 for (i = 0; i < dm->num_of_edps; i++) {
4014 if (bd == dm->backlight_dev[i])
4017 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4019 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4024 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4027 struct amdgpu_dm_backlight_caps caps;
4028 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4030 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4031 caps = dm->backlight_caps[bl_idx];
4033 if (caps.aux_support) {
4037 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4039 return dm->brightness[bl_idx];
4040 return convert_brightness_to_user(&caps, avg);
4042 int ret = dc_link_get_backlight_level(link);
4044 if (ret == DC_ERROR_UNEXPECTED)
4045 return dm->brightness[bl_idx];
4046 return convert_brightness_to_user(&caps, ret);
4050 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4052 struct amdgpu_display_manager *dm = bl_get_data(bd);
4055 for (i = 0; i < dm->num_of_edps; i++) {
4056 if (bd == dm->backlight_dev[i])
4059 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4061 return amdgpu_dm_backlight_get_level(dm, i);
4064 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4065 .options = BL_CORE_SUSPENDRESUME,
4066 .get_brightness = amdgpu_dm_backlight_get_brightness,
4067 .update_status = amdgpu_dm_backlight_update_status,
4071 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4074 struct backlight_properties props = { 0 };
4076 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4077 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4079 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4080 props.brightness = AMDGPU_MAX_BL_LEVEL;
4081 props.type = BACKLIGHT_RAW;
4083 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4084 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4086 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4087 adev_to_drm(dm->adev)->dev,
4089 &amdgpu_dm_backlight_ops,
4092 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4093 DRM_ERROR("DM: Backlight registration failed!\n");
4095 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4099 static int initialize_plane(struct amdgpu_display_manager *dm,
4100 struct amdgpu_mode_info *mode_info, int plane_id,
4101 enum drm_plane_type plane_type,
4102 const struct dc_plane_cap *plane_cap)
4104 struct drm_plane *plane;
4105 unsigned long possible_crtcs;
4108 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4110 DRM_ERROR("KMS: Failed to allocate plane\n");
4113 plane->type = plane_type;
4116 * HACK: IGT tests expect that the primary plane for a CRTC
4117 * can only have one possible CRTC. Only expose support for
4118 * any CRTC if they're not going to be used as a primary plane
4119 * for a CRTC - like overlay or underlay planes.
4121 possible_crtcs = 1 << plane_id;
4122 if (plane_id >= dm->dc->caps.max_streams)
4123 possible_crtcs = 0xff;
4125 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4128 DRM_ERROR("KMS: Failed to initialize plane\n");
4134 mode_info->planes[plane_id] = plane;
4140 static void register_backlight_device(struct amdgpu_display_manager *dm,
4141 struct dc_link *link)
4143 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4144 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4146 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4147 link->type != dc_connection_none) {
4149 * Event if registration failed, we should continue with
4150 * DM initialization because not having a backlight control
4151 * is better then a black screen.
4153 if (!dm->backlight_dev[dm->num_of_edps])
4154 amdgpu_dm_register_backlight_device(dm);
4156 if (dm->backlight_dev[dm->num_of_edps]) {
4157 dm->backlight_link[dm->num_of_edps] = link;
4166 * In this architecture, the association
4167 * connector -> encoder -> crtc
4168 * id not really requried. The crtc and connector will hold the
4169 * display_index as an abstraction to use with DAL component
4171 * Returns 0 on success
4173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4175 struct amdgpu_display_manager *dm = &adev->dm;
4177 struct amdgpu_dm_connector *aconnector = NULL;
4178 struct amdgpu_encoder *aencoder = NULL;
4179 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4181 int32_t primary_planes;
4182 enum dc_connection_type new_connection_type = dc_connection_none;
4183 const struct dc_plane_cap *plane;
4184 bool psr_feature_enabled = false;
4186 dm->display_indexes_num = dm->dc->caps.max_streams;
4187 /* Update the actual used number of crtc */
4188 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4190 link_cnt = dm->dc->caps.max_links;
4191 if (amdgpu_dm_mode_config_init(dm->adev)) {
4192 DRM_ERROR("DM: Failed to initialize mode config\n");
4196 /* There is one primary plane per CRTC */
4197 primary_planes = dm->dc->caps.max_streams;
4198 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4201 * Initialize primary planes, implicit planes for legacy IOCTLS.
4202 * Order is reversed to match iteration order in atomic check.
4204 for (i = (primary_planes - 1); i >= 0; i--) {
4205 plane = &dm->dc->caps.planes[i];
4207 if (initialize_plane(dm, mode_info, i,
4208 DRM_PLANE_TYPE_PRIMARY, plane)) {
4209 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4215 * Initialize overlay planes, index starting after primary planes.
4216 * These planes have a higher DRM index than the primary planes since
4217 * they should be considered as having a higher z-order.
4218 * Order is reversed to match iteration order in atomic check.
4220 * Only support DCN for now, and only expose one so we don't encourage
4221 * userspace to use up all the pipes.
4223 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4224 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4226 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4229 if (!plane->blends_with_above || !plane->blends_with_below)
4232 if (!plane->pixel_format_support.argb8888)
4235 if (initialize_plane(dm, NULL, primary_planes + i,
4236 DRM_PLANE_TYPE_OVERLAY, plane)) {
4237 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4241 /* Only create one overlay plane. */
4245 for (i = 0; i < dm->dc->caps.max_streams; i++)
4246 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4247 DRM_ERROR("KMS: Failed to initialize crtc\n");
4251 #if defined(CONFIG_DRM_AMD_DC_DCN)
4252 /* Use Outbox interrupt */
4253 switch (adev->ip_versions[DCE_HWIP][0]) {
4254 case IP_VERSION(3, 0, 0):
4255 case IP_VERSION(3, 1, 2):
4256 case IP_VERSION(3, 1, 3):
4257 case IP_VERSION(3, 1, 5):
4258 case IP_VERSION(3, 1, 6):
4259 case IP_VERSION(2, 1, 0):
4260 if (register_outbox_irq_handlers(dm->adev)) {
4261 DRM_ERROR("DM: Failed to initialize IRQ\n");
4266 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4267 adev->ip_versions[DCE_HWIP][0]);
4270 /* Determine whether to enable PSR support by default. */
4271 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4272 switch (adev->ip_versions[DCE_HWIP][0]) {
4273 case IP_VERSION(3, 1, 2):
4274 case IP_VERSION(3, 1, 3):
4275 case IP_VERSION(3, 1, 5):
4276 case IP_VERSION(3, 1, 6):
4277 psr_feature_enabled = true;
4280 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4286 /* Disable vblank IRQs aggressively for power-saving. */
4287 adev_to_drm(adev)->vblank_disable_immediate = true;
4289 /* loops over all connectors on the board */
4290 for (i = 0; i < link_cnt; i++) {
4291 struct dc_link *link = NULL;
4293 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4295 "KMS: Cannot support more than %d display indexes\n",
4296 AMDGPU_DM_MAX_DISPLAY_INDEX);
4300 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4304 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4308 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4309 DRM_ERROR("KMS: Failed to initialize encoder\n");
4313 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4314 DRM_ERROR("KMS: Failed to initialize connector\n");
4318 link = dc_get_link_at_index(dm->dc, i);
4320 if (!dc_link_detect_sink(link, &new_connection_type))
4321 DRM_ERROR("KMS: Failed to detect connector\n");
4323 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4324 emulated_link_detect(link);
4325 amdgpu_dm_update_connector_after_detect(aconnector);
4327 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4328 amdgpu_dm_update_connector_after_detect(aconnector);
4329 register_backlight_device(dm, link);
4330 if (dm->num_of_edps)
4331 update_connector_ext_caps(aconnector);
4332 if (psr_feature_enabled)
4333 amdgpu_dm_set_psr_caps(link);
4335 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4336 * PSR is also supported.
4338 if (link->psr_settings.psr_feature_enabled)
4339 adev_to_drm(adev)->vblank_disable_immediate = false;
4345 /* Software is initialized. Now we can register interrupt handlers. */
4346 switch (adev->asic_type) {
4347 #if defined(CONFIG_DRM_AMD_DC_SI)
4352 if (dce60_register_irq_handlers(dm->adev)) {
4353 DRM_ERROR("DM: Failed to initialize IRQ\n");
4367 case CHIP_POLARIS11:
4368 case CHIP_POLARIS10:
4369 case CHIP_POLARIS12:
4374 if (dce110_register_irq_handlers(dm->adev)) {
4375 DRM_ERROR("DM: Failed to initialize IRQ\n");
4380 #if defined(CONFIG_DRM_AMD_DC_DCN)
4381 switch (adev->ip_versions[DCE_HWIP][0]) {
4382 case IP_VERSION(1, 0, 0):
4383 case IP_VERSION(1, 0, 1):
4384 case IP_VERSION(2, 0, 2):
4385 case IP_VERSION(2, 0, 3):
4386 case IP_VERSION(2, 0, 0):
4387 case IP_VERSION(2, 1, 0):
4388 case IP_VERSION(3, 0, 0):
4389 case IP_VERSION(3, 0, 2):
4390 case IP_VERSION(3, 0, 3):
4391 case IP_VERSION(3, 0, 1):
4392 case IP_VERSION(3, 1, 2):
4393 case IP_VERSION(3, 1, 3):
4394 case IP_VERSION(3, 1, 5):
4395 case IP_VERSION(3, 1, 6):
4396 if (dcn10_register_irq_handlers(dm->adev)) {
4397 DRM_ERROR("DM: Failed to initialize IRQ\n");
4402 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4403 adev->ip_versions[DCE_HWIP][0]);
4418 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4420 drm_atomic_private_obj_fini(&dm->atomic_obj);
4424 /******************************************************************************
4425 * amdgpu_display_funcs functions
4426 *****************************************************************************/
4429 * dm_bandwidth_update - program display watermarks
4431 * @adev: amdgpu_device pointer
4433 * Calculate and program the display watermarks and line buffer allocation.
4435 static void dm_bandwidth_update(struct amdgpu_device *adev)
4437 /* TODO: implement later */
4440 static const struct amdgpu_display_funcs dm_display_funcs = {
4441 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4442 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4443 .backlight_set_level = NULL, /* never called for DC */
4444 .backlight_get_level = NULL, /* never called for DC */
4445 .hpd_sense = NULL,/* called unconditionally */
4446 .hpd_set_polarity = NULL, /* called unconditionally */
4447 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4448 .page_flip_get_scanoutpos =
4449 dm_crtc_get_scanoutpos,/* called unconditionally */
4450 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4451 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4454 #if defined(CONFIG_DEBUG_KERNEL_DC)
4456 static ssize_t s3_debug_store(struct device *device,
4457 struct device_attribute *attr,
4463 struct drm_device *drm_dev = dev_get_drvdata(device);
4464 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4466 ret = kstrtoint(buf, 0, &s3_state);
4471 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4476 return ret == 0 ? count : 0;
4479 DEVICE_ATTR_WO(s3_debug);
4483 static int dm_early_init(void *handle)
4485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4487 switch (adev->asic_type) {
4488 #if defined(CONFIG_DRM_AMD_DC_SI)
4492 adev->mode_info.num_crtc = 6;
4493 adev->mode_info.num_hpd = 6;
4494 adev->mode_info.num_dig = 6;
4497 adev->mode_info.num_crtc = 2;
4498 adev->mode_info.num_hpd = 2;
4499 adev->mode_info.num_dig = 2;
4504 adev->mode_info.num_crtc = 6;
4505 adev->mode_info.num_hpd = 6;
4506 adev->mode_info.num_dig = 6;
4509 adev->mode_info.num_crtc = 4;
4510 adev->mode_info.num_hpd = 6;
4511 adev->mode_info.num_dig = 7;
4515 adev->mode_info.num_crtc = 2;
4516 adev->mode_info.num_hpd = 6;
4517 adev->mode_info.num_dig = 6;
4521 adev->mode_info.num_crtc = 6;
4522 adev->mode_info.num_hpd = 6;
4523 adev->mode_info.num_dig = 7;
4526 adev->mode_info.num_crtc = 3;
4527 adev->mode_info.num_hpd = 6;
4528 adev->mode_info.num_dig = 9;
4531 adev->mode_info.num_crtc = 2;
4532 adev->mode_info.num_hpd = 6;
4533 adev->mode_info.num_dig = 9;
4535 case CHIP_POLARIS11:
4536 case CHIP_POLARIS12:
4537 adev->mode_info.num_crtc = 5;
4538 adev->mode_info.num_hpd = 5;
4539 adev->mode_info.num_dig = 5;
4541 case CHIP_POLARIS10:
4543 adev->mode_info.num_crtc = 6;
4544 adev->mode_info.num_hpd = 6;
4545 adev->mode_info.num_dig = 6;
4550 adev->mode_info.num_crtc = 6;
4551 adev->mode_info.num_hpd = 6;
4552 adev->mode_info.num_dig = 6;
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556 switch (adev->ip_versions[DCE_HWIP][0]) {
4557 case IP_VERSION(2, 0, 2):
4558 case IP_VERSION(3, 0, 0):
4559 adev->mode_info.num_crtc = 6;
4560 adev->mode_info.num_hpd = 6;
4561 adev->mode_info.num_dig = 6;
4563 case IP_VERSION(2, 0, 0):
4564 case IP_VERSION(3, 0, 2):
4565 adev->mode_info.num_crtc = 5;
4566 adev->mode_info.num_hpd = 5;
4567 adev->mode_info.num_dig = 5;
4569 case IP_VERSION(2, 0, 3):
4570 case IP_VERSION(3, 0, 3):
4571 adev->mode_info.num_crtc = 2;
4572 adev->mode_info.num_hpd = 2;
4573 adev->mode_info.num_dig = 2;
4575 case IP_VERSION(1, 0, 0):
4576 case IP_VERSION(1, 0, 1):
4577 case IP_VERSION(3, 0, 1):
4578 case IP_VERSION(2, 1, 0):
4579 case IP_VERSION(3, 1, 2):
4580 case IP_VERSION(3, 1, 3):
4581 case IP_VERSION(3, 1, 5):
4582 case IP_VERSION(3, 1, 6):
4583 adev->mode_info.num_crtc = 4;
4584 adev->mode_info.num_hpd = 4;
4585 adev->mode_info.num_dig = 4;
4588 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4589 adev->ip_versions[DCE_HWIP][0]);
4596 amdgpu_dm_set_irq_funcs(adev);
4598 if (adev->mode_info.funcs == NULL)
4599 adev->mode_info.funcs = &dm_display_funcs;
4602 * Note: Do NOT change adev->audio_endpt_rreg and
4603 * adev->audio_endpt_wreg because they are initialised in
4604 * amdgpu_device_init()
4606 #if defined(CONFIG_DEBUG_KERNEL_DC)
4608 adev_to_drm(adev)->dev,
4609 &dev_attr_s3_debug);
4615 static bool modeset_required(struct drm_crtc_state *crtc_state,
4616 struct dc_stream_state *new_stream,
4617 struct dc_stream_state *old_stream)
4619 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4622 static bool modereset_required(struct drm_crtc_state *crtc_state)
4624 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4627 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4629 drm_encoder_cleanup(encoder);
4633 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4634 .destroy = amdgpu_dm_encoder_destroy,
4638 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4639 struct drm_framebuffer *fb,
4640 int *min_downscale, int *max_upscale)
4642 struct amdgpu_device *adev = drm_to_adev(dev);
4643 struct dc *dc = adev->dm.dc;
4644 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4645 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4647 switch (fb->format->format) {
4648 case DRM_FORMAT_P010:
4649 case DRM_FORMAT_NV12:
4650 case DRM_FORMAT_NV21:
4651 *max_upscale = plane_cap->max_upscale_factor.nv12;
4652 *min_downscale = plane_cap->max_downscale_factor.nv12;
4655 case DRM_FORMAT_XRGB16161616F:
4656 case DRM_FORMAT_ARGB16161616F:
4657 case DRM_FORMAT_XBGR16161616F:
4658 case DRM_FORMAT_ABGR16161616F:
4659 *max_upscale = plane_cap->max_upscale_factor.fp16;
4660 *min_downscale = plane_cap->max_downscale_factor.fp16;
4664 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4665 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4670 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4671 * scaling factor of 1.0 == 1000 units.
4673 if (*max_upscale == 1)
4674 *max_upscale = 1000;
4676 if (*min_downscale == 1)
4677 *min_downscale = 1000;
4681 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4682 const struct drm_plane_state *state,
4683 struct dc_scaling_info *scaling_info)
4685 int scale_w, scale_h, min_downscale, max_upscale;
4687 memset(scaling_info, 0, sizeof(*scaling_info));
4689 /* Source is fixed 16.16 but we ignore mantissa for now... */
4690 scaling_info->src_rect.x = state->src_x >> 16;
4691 scaling_info->src_rect.y = state->src_y >> 16;
4694 * For reasons we don't (yet) fully understand a non-zero
4695 * src_y coordinate into an NV12 buffer can cause a
4696 * system hang on DCN1x.
4697 * To avoid hangs (and maybe be overly cautious)
4698 * let's reject both non-zero src_x and src_y.
4700 * We currently know of only one use-case to reproduce a
4701 * scenario with non-zero src_x and src_y for NV12, which
4702 * is to gesture the YouTube Android app into full screen
4705 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4706 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4707 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4708 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4711 scaling_info->src_rect.width = state->src_w >> 16;
4712 if (scaling_info->src_rect.width == 0)
4715 scaling_info->src_rect.height = state->src_h >> 16;
4716 if (scaling_info->src_rect.height == 0)
4719 scaling_info->dst_rect.x = state->crtc_x;
4720 scaling_info->dst_rect.y = state->crtc_y;
4722 if (state->crtc_w == 0)
4725 scaling_info->dst_rect.width = state->crtc_w;
4727 if (state->crtc_h == 0)
4730 scaling_info->dst_rect.height = state->crtc_h;
4732 /* DRM doesn't specify clipping on destination output. */
4733 scaling_info->clip_rect = scaling_info->dst_rect;
4735 /* Validate scaling per-format with DC plane caps */
4736 if (state->plane && state->plane->dev && state->fb) {
4737 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4738 &min_downscale, &max_upscale);
4740 min_downscale = 250;
4741 max_upscale = 16000;
4744 scale_w = scaling_info->dst_rect.width * 1000 /
4745 scaling_info->src_rect.width;
4747 if (scale_w < min_downscale || scale_w > max_upscale)
4750 scale_h = scaling_info->dst_rect.height * 1000 /
4751 scaling_info->src_rect.height;
4753 if (scale_h < min_downscale || scale_h > max_upscale)
4757 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4758 * assume reasonable defaults based on the format.
4765 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4766 uint64_t tiling_flags)
4768 /* Fill GFX8 params */
4769 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4770 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4772 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4773 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4774 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4775 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4776 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4778 /* XXX fix me for VI */
4779 tiling_info->gfx8.num_banks = num_banks;
4780 tiling_info->gfx8.array_mode =
4781 DC_ARRAY_2D_TILED_THIN1;
4782 tiling_info->gfx8.tile_split = tile_split;
4783 tiling_info->gfx8.bank_width = bankw;
4784 tiling_info->gfx8.bank_height = bankh;
4785 tiling_info->gfx8.tile_aspect = mtaspect;
4786 tiling_info->gfx8.tile_mode =
4787 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4788 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4789 == DC_ARRAY_1D_TILED_THIN1) {
4790 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4793 tiling_info->gfx8.pipe_config =
4794 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4798 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4799 union dc_tiling_info *tiling_info)
4801 tiling_info->gfx9.num_pipes =
4802 adev->gfx.config.gb_addr_config_fields.num_pipes;
4803 tiling_info->gfx9.num_banks =
4804 adev->gfx.config.gb_addr_config_fields.num_banks;
4805 tiling_info->gfx9.pipe_interleave =
4806 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4807 tiling_info->gfx9.num_shader_engines =
4808 adev->gfx.config.gb_addr_config_fields.num_se;
4809 tiling_info->gfx9.max_compressed_frags =
4810 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4811 tiling_info->gfx9.num_rb_per_se =
4812 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4813 tiling_info->gfx9.shaderEnable = 1;
4814 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4815 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4819 validate_dcc(struct amdgpu_device *adev,
4820 const enum surface_pixel_format format,
4821 const enum dc_rotation_angle rotation,
4822 const union dc_tiling_info *tiling_info,
4823 const struct dc_plane_dcc_param *dcc,
4824 const struct dc_plane_address *address,
4825 const struct plane_size *plane_size)
4827 struct dc *dc = adev->dm.dc;
4828 struct dc_dcc_surface_param input;
4829 struct dc_surface_dcc_cap output;
4831 memset(&input, 0, sizeof(input));
4832 memset(&output, 0, sizeof(output));
4837 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4838 !dc->cap_funcs.get_dcc_compression_cap)
4841 input.format = format;
4842 input.surface_size.width = plane_size->surface_size.width;
4843 input.surface_size.height = plane_size->surface_size.height;
4844 input.swizzle_mode = tiling_info->gfx9.swizzle;
4846 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4847 input.scan = SCAN_DIRECTION_HORIZONTAL;
4848 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4849 input.scan = SCAN_DIRECTION_VERTICAL;
4851 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4854 if (!output.capable)
4857 if (dcc->independent_64b_blks == 0 &&
4858 output.grph.rgb.independent_64b_blks != 0)
4865 modifier_has_dcc(uint64_t modifier)
4867 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4871 modifier_gfx9_swizzle_mode(uint64_t modifier)
4873 if (modifier == DRM_FORMAT_MOD_LINEAR)
4876 return AMD_FMT_MOD_GET(TILE, modifier);
4879 static const struct drm_format_info *
4880 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4882 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4886 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4887 union dc_tiling_info *tiling_info,
4890 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4891 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4892 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4893 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4895 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4897 if (!IS_AMD_FMT_MOD(modifier))
4900 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4901 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4903 if (adev->family >= AMDGPU_FAMILY_NV) {
4904 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4906 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4908 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4912 enum dm_micro_swizzle {
4913 MICRO_SWIZZLE_Z = 0,
4914 MICRO_SWIZZLE_S = 1,
4915 MICRO_SWIZZLE_D = 2,
4919 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4923 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4924 const struct drm_format_info *info = drm_format_info(format);
4927 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4933 * We always have to allow these modifiers:
4934 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4935 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4937 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4938 modifier == DRM_FORMAT_MOD_INVALID) {
4942 /* Check that the modifier is on the list of the plane's supported modifiers. */
4943 for (i = 0; i < plane->modifier_count; i++) {
4944 if (modifier == plane->modifiers[i])
4947 if (i == plane->modifier_count)
4951 * For D swizzle the canonical modifier depends on the bpp, so check
4954 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4955 adev->family >= AMDGPU_FAMILY_NV) {
4956 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4960 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4964 if (modifier_has_dcc(modifier)) {
4965 /* Per radeonsi comments 16/64 bpp are more complicated. */
4966 if (info->cpp[0] != 4)
4968 /* We support multi-planar formats, but not when combined with
4969 * additional DCC metadata planes. */
4970 if (info->num_planes > 1)
4978 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4983 if (*cap - *size < 1) {
4984 uint64_t new_cap = *cap * 2;
4985 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4993 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4999 (*mods)[*size] = mod;
5004 add_gfx9_modifiers(const struct amdgpu_device *adev,
5005 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5007 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5008 int pipe_xor_bits = min(8, pipes +
5009 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5010 int bank_xor_bits = min(8 - pipe_xor_bits,
5011 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5012 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5013 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5016 if (adev->family == AMDGPU_FAMILY_RV) {
5017 /* Raven2 and later */
5018 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5021 * No _D DCC swizzles yet because we only allow 32bpp, which
5022 * doesn't support _D on DCN
5025 if (has_constant_encode) {
5026 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5027 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5028 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5029 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5030 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5031 AMD_FMT_MOD_SET(DCC, 1) |
5032 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5033 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5034 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5037 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5038 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5039 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5040 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5041 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5042 AMD_FMT_MOD_SET(DCC, 1) |
5043 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5045 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5047 if (has_constant_encode) {
5048 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5049 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5050 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5051 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5052 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5053 AMD_FMT_MOD_SET(DCC, 1) |
5054 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5055 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5056 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5058 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5059 AMD_FMT_MOD_SET(RB, rb) |
5060 AMD_FMT_MOD_SET(PIPE, pipes));
5063 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5065 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5066 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5068 AMD_FMT_MOD_SET(DCC, 1) |
5069 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5070 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5071 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5072 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5073 AMD_FMT_MOD_SET(RB, rb) |
5074 AMD_FMT_MOD_SET(PIPE, pipes));
5078 * Only supported for 64bpp on Raven, will be filtered on format in
5079 * dm_plane_format_mod_supported.
5081 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5083 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5084 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5085 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5087 if (adev->family == AMDGPU_FAMILY_RV) {
5088 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5090 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5091 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5096 * Only supported for 64bpp on Raven, will be filtered on format in
5097 * dm_plane_format_mod_supported.
5099 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5101 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5103 if (adev->family == AMDGPU_FAMILY_RV) {
5104 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5106 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5111 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5112 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5114 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5116 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5117 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5118 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5119 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5120 AMD_FMT_MOD_SET(DCC, 1) |
5121 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5122 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5123 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5125 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5126 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5127 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5128 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5129 AMD_FMT_MOD_SET(DCC, 1) |
5130 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5131 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5132 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5133 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5140 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5141 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5142 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5143 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5146 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5147 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5149 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5152 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5153 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5158 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5160 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5161 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5163 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5164 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5165 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5166 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5167 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5168 AMD_FMT_MOD_SET(DCC, 1) |
5169 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5170 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5171 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5172 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5174 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5175 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5176 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5177 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5178 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5179 AMD_FMT_MOD_SET(DCC, 1) |
5180 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5181 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5182 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5184 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5185 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5186 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5187 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5188 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5189 AMD_FMT_MOD_SET(DCC, 1) |
5190 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5191 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5192 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5193 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5194 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5196 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5197 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5198 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5199 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5200 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5201 AMD_FMT_MOD_SET(DCC, 1) |
5202 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5203 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5204 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5205 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5207 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5208 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5209 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5210 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5211 AMD_FMT_MOD_SET(PACKERS, pkrs));
5213 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5214 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5215 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5216 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5217 AMD_FMT_MOD_SET(PACKERS, pkrs));
5219 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5220 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5221 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5222 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5224 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5225 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5226 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5230 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5232 uint64_t size = 0, capacity = 128;
5235 /* We have not hooked up any pre-GFX9 modifiers. */
5236 if (adev->family < AMDGPU_FAMILY_AI)
5239 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5241 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5242 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5243 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5244 return *mods ? 0 : -ENOMEM;
5247 switch (adev->family) {
5248 case AMDGPU_FAMILY_AI:
5249 case AMDGPU_FAMILY_RV:
5250 add_gfx9_modifiers(adev, mods, &size, &capacity);
5252 case AMDGPU_FAMILY_NV:
5253 case AMDGPU_FAMILY_VGH:
5254 case AMDGPU_FAMILY_YC:
5255 case AMDGPU_FAMILY_GC_10_3_6:
5256 case AMDGPU_FAMILY_GC_10_3_7:
5257 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5258 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5260 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5264 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5266 /* INVALID marks the end of the list. */
5267 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5276 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5277 const struct amdgpu_framebuffer *afb,
5278 const enum surface_pixel_format format,
5279 const enum dc_rotation_angle rotation,
5280 const struct plane_size *plane_size,
5281 union dc_tiling_info *tiling_info,
5282 struct dc_plane_dcc_param *dcc,
5283 struct dc_plane_address *address,
5284 const bool force_disable_dcc)
5286 const uint64_t modifier = afb->base.modifier;
5289 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5290 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5292 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5293 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5294 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5295 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5298 dcc->meta_pitch = afb->base.pitches[1];
5299 dcc->independent_64b_blks = independent_64b_blks;
5300 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5301 if (independent_64b_blks && independent_128b_blks)
5302 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5303 else if (independent_128b_blks)
5304 dcc->dcc_ind_blk = hubp_ind_block_128b;
5305 else if (independent_64b_blks && !independent_128b_blks)
5306 dcc->dcc_ind_blk = hubp_ind_block_64b;
5308 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5310 if (independent_64b_blks)
5311 dcc->dcc_ind_blk = hubp_ind_block_64b;
5313 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5316 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5317 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5320 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5322 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5328 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5329 const struct amdgpu_framebuffer *afb,
5330 const enum surface_pixel_format format,
5331 const enum dc_rotation_angle rotation,
5332 const uint64_t tiling_flags,
5333 union dc_tiling_info *tiling_info,
5334 struct plane_size *plane_size,
5335 struct dc_plane_dcc_param *dcc,
5336 struct dc_plane_address *address,
5338 bool force_disable_dcc)
5340 const struct drm_framebuffer *fb = &afb->base;
5343 memset(tiling_info, 0, sizeof(*tiling_info));
5344 memset(plane_size, 0, sizeof(*plane_size));
5345 memset(dcc, 0, sizeof(*dcc));
5346 memset(address, 0, sizeof(*address));
5348 address->tmz_surface = tmz_surface;
5350 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5351 uint64_t addr = afb->address + fb->offsets[0];
5353 plane_size->surface_size.x = 0;
5354 plane_size->surface_size.y = 0;
5355 plane_size->surface_size.width = fb->width;
5356 plane_size->surface_size.height = fb->height;
5357 plane_size->surface_pitch =
5358 fb->pitches[0] / fb->format->cpp[0];
5360 address->type = PLN_ADDR_TYPE_GRAPHICS;
5361 address->grph.addr.low_part = lower_32_bits(addr);
5362 address->grph.addr.high_part = upper_32_bits(addr);
5363 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5364 uint64_t luma_addr = afb->address + fb->offsets[0];
5365 uint64_t chroma_addr = afb->address + fb->offsets[1];
5367 plane_size->surface_size.x = 0;
5368 plane_size->surface_size.y = 0;
5369 plane_size->surface_size.width = fb->width;
5370 plane_size->surface_size.height = fb->height;
5371 plane_size->surface_pitch =
5372 fb->pitches[0] / fb->format->cpp[0];
5374 plane_size->chroma_size.x = 0;
5375 plane_size->chroma_size.y = 0;
5376 /* TODO: set these based on surface format */
5377 plane_size->chroma_size.width = fb->width / 2;
5378 plane_size->chroma_size.height = fb->height / 2;
5380 plane_size->chroma_pitch =
5381 fb->pitches[1] / fb->format->cpp[1];
5383 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5384 address->video_progressive.luma_addr.low_part =
5385 lower_32_bits(luma_addr);
5386 address->video_progressive.luma_addr.high_part =
5387 upper_32_bits(luma_addr);
5388 address->video_progressive.chroma_addr.low_part =
5389 lower_32_bits(chroma_addr);
5390 address->video_progressive.chroma_addr.high_part =
5391 upper_32_bits(chroma_addr);
5394 if (adev->family >= AMDGPU_FAMILY_AI) {
5395 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5396 rotation, plane_size,
5403 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5410 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5411 bool *per_pixel_alpha, bool *global_alpha,
5412 int *global_alpha_value)
5414 *per_pixel_alpha = false;
5415 *global_alpha = false;
5416 *global_alpha_value = 0xff;
5418 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5421 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5422 static const uint32_t alpha_formats[] = {
5423 DRM_FORMAT_ARGB8888,
5424 DRM_FORMAT_RGBA8888,
5425 DRM_FORMAT_ABGR8888,
5427 uint32_t format = plane_state->fb->format->format;
5430 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5431 if (format == alpha_formats[i]) {
5432 *per_pixel_alpha = true;
5438 if (plane_state->alpha < 0xffff) {
5439 *global_alpha = true;
5440 *global_alpha_value = plane_state->alpha >> 8;
5445 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5446 const enum surface_pixel_format format,
5447 enum dc_color_space *color_space)
5451 *color_space = COLOR_SPACE_SRGB;
5453 /* DRM color properties only affect non-RGB formats. */
5454 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5457 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5459 switch (plane_state->color_encoding) {
5460 case DRM_COLOR_YCBCR_BT601:
5462 *color_space = COLOR_SPACE_YCBCR601;
5464 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5467 case DRM_COLOR_YCBCR_BT709:
5469 *color_space = COLOR_SPACE_YCBCR709;
5471 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5474 case DRM_COLOR_YCBCR_BT2020:
5476 *color_space = COLOR_SPACE_2020_YCBCR;
5489 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5490 const struct drm_plane_state *plane_state,
5491 const uint64_t tiling_flags,
5492 struct dc_plane_info *plane_info,
5493 struct dc_plane_address *address,
5495 bool force_disable_dcc)
5497 const struct drm_framebuffer *fb = plane_state->fb;
5498 const struct amdgpu_framebuffer *afb =
5499 to_amdgpu_framebuffer(plane_state->fb);
5502 memset(plane_info, 0, sizeof(*plane_info));
5504 switch (fb->format->format) {
5506 plane_info->format =
5507 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5509 case DRM_FORMAT_RGB565:
5510 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5512 case DRM_FORMAT_XRGB8888:
5513 case DRM_FORMAT_ARGB8888:
5514 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5516 case DRM_FORMAT_XRGB2101010:
5517 case DRM_FORMAT_ARGB2101010:
5518 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5520 case DRM_FORMAT_XBGR2101010:
5521 case DRM_FORMAT_ABGR2101010:
5522 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5524 case DRM_FORMAT_XBGR8888:
5525 case DRM_FORMAT_ABGR8888:
5526 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5528 case DRM_FORMAT_NV21:
5529 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5531 case DRM_FORMAT_NV12:
5532 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5534 case DRM_FORMAT_P010:
5535 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5537 case DRM_FORMAT_XRGB16161616F:
5538 case DRM_FORMAT_ARGB16161616F:
5539 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5541 case DRM_FORMAT_XBGR16161616F:
5542 case DRM_FORMAT_ABGR16161616F:
5543 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5545 case DRM_FORMAT_XRGB16161616:
5546 case DRM_FORMAT_ARGB16161616:
5547 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5549 case DRM_FORMAT_XBGR16161616:
5550 case DRM_FORMAT_ABGR16161616:
5551 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5555 "Unsupported screen format %p4cc\n",
5556 &fb->format->format);
5560 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5561 case DRM_MODE_ROTATE_0:
5562 plane_info->rotation = ROTATION_ANGLE_0;
5564 case DRM_MODE_ROTATE_90:
5565 plane_info->rotation = ROTATION_ANGLE_90;
5567 case DRM_MODE_ROTATE_180:
5568 plane_info->rotation = ROTATION_ANGLE_180;
5570 case DRM_MODE_ROTATE_270:
5571 plane_info->rotation = ROTATION_ANGLE_270;
5574 plane_info->rotation = ROTATION_ANGLE_0;
5578 plane_info->visible = true;
5579 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5581 plane_info->layer_index = 0;
5583 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5584 &plane_info->color_space);
5588 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5589 plane_info->rotation, tiling_flags,
5590 &plane_info->tiling_info,
5591 &plane_info->plane_size,
5592 &plane_info->dcc, address, tmz_surface,
5597 fill_blending_from_plane_state(
5598 plane_state, &plane_info->per_pixel_alpha,
5599 &plane_info->global_alpha, &plane_info->global_alpha_value);
5604 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5605 struct dc_plane_state *dc_plane_state,
5606 struct drm_plane_state *plane_state,
5607 struct drm_crtc_state *crtc_state)
5609 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5610 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5611 struct dc_scaling_info scaling_info;
5612 struct dc_plane_info plane_info;
5614 bool force_disable_dcc = false;
5616 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5620 dc_plane_state->src_rect = scaling_info.src_rect;
5621 dc_plane_state->dst_rect = scaling_info.dst_rect;
5622 dc_plane_state->clip_rect = scaling_info.clip_rect;
5623 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5625 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5626 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5629 &dc_plane_state->address,
5635 dc_plane_state->format = plane_info.format;
5636 dc_plane_state->color_space = plane_info.color_space;
5637 dc_plane_state->format = plane_info.format;
5638 dc_plane_state->plane_size = plane_info.plane_size;
5639 dc_plane_state->rotation = plane_info.rotation;
5640 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5641 dc_plane_state->stereo_format = plane_info.stereo_format;
5642 dc_plane_state->tiling_info = plane_info.tiling_info;
5643 dc_plane_state->visible = plane_info.visible;
5644 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5645 dc_plane_state->global_alpha = plane_info.global_alpha;
5646 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5647 dc_plane_state->dcc = plane_info.dcc;
5648 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5649 dc_plane_state->flip_int_enabled = true;
5652 * Always set input transfer function, since plane state is refreshed
5655 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5662 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5663 const struct dm_connector_state *dm_state,
5664 struct dc_stream_state *stream)
5666 enum amdgpu_rmx_type rmx_type;
5668 struct rect src = { 0 }; /* viewport in composition space*/
5669 struct rect dst = { 0 }; /* stream addressable area */
5671 /* no mode. nothing to be done */
5675 /* Full screen scaling by default */
5676 src.width = mode->hdisplay;
5677 src.height = mode->vdisplay;
5678 dst.width = stream->timing.h_addressable;
5679 dst.height = stream->timing.v_addressable;
5682 rmx_type = dm_state->scaling;
5683 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5684 if (src.width * dst.height <
5685 src.height * dst.width) {
5686 /* height needs less upscaling/more downscaling */
5687 dst.width = src.width *
5688 dst.height / src.height;
5690 /* width needs less upscaling/more downscaling */
5691 dst.height = src.height *
5692 dst.width / src.width;
5694 } else if (rmx_type == RMX_CENTER) {
5698 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5699 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5701 if (dm_state->underscan_enable) {
5702 dst.x += dm_state->underscan_hborder / 2;
5703 dst.y += dm_state->underscan_vborder / 2;
5704 dst.width -= dm_state->underscan_hborder;
5705 dst.height -= dm_state->underscan_vborder;
5712 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5713 dst.x, dst.y, dst.width, dst.height);
5717 static enum dc_color_depth
5718 convert_color_depth_from_display_info(const struct drm_connector *connector,
5719 bool is_y420, int requested_bpc)
5726 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5727 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5729 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5731 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5734 bpc = (uint8_t)connector->display_info.bpc;
5735 /* Assume 8 bpc by default if no bpc is specified. */
5736 bpc = bpc ? bpc : 8;
5739 if (requested_bpc > 0) {
5741 * Cap display bpc based on the user requested value.
5743 * The value for state->max_bpc may not correctly updated
5744 * depending on when the connector gets added to the state
5745 * or if this was called outside of atomic check, so it
5746 * can't be used directly.
5748 bpc = min_t(u8, bpc, requested_bpc);
5750 /* Round down to the nearest even number. */
5751 bpc = bpc - (bpc & 1);
5757 * Temporary Work around, DRM doesn't parse color depth for
5758 * EDID revision before 1.4
5759 * TODO: Fix edid parsing
5761 return COLOR_DEPTH_888;
5763 return COLOR_DEPTH_666;
5765 return COLOR_DEPTH_888;
5767 return COLOR_DEPTH_101010;
5769 return COLOR_DEPTH_121212;
5771 return COLOR_DEPTH_141414;
5773 return COLOR_DEPTH_161616;
5775 return COLOR_DEPTH_UNDEFINED;
5779 static enum dc_aspect_ratio
5780 get_aspect_ratio(const struct drm_display_mode *mode_in)
5782 /* 1-1 mapping, since both enums follow the HDMI spec. */
5783 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5786 static enum dc_color_space
5787 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5789 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5791 switch (dc_crtc_timing->pixel_encoding) {
5792 case PIXEL_ENCODING_YCBCR422:
5793 case PIXEL_ENCODING_YCBCR444:
5794 case PIXEL_ENCODING_YCBCR420:
5797 * 27030khz is the separation point between HDTV and SDTV
5798 * according to HDMI spec, we use YCbCr709 and YCbCr601
5801 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5802 if (dc_crtc_timing->flags.Y_ONLY)
5804 COLOR_SPACE_YCBCR709_LIMITED;
5806 color_space = COLOR_SPACE_YCBCR709;
5808 if (dc_crtc_timing->flags.Y_ONLY)
5810 COLOR_SPACE_YCBCR601_LIMITED;
5812 color_space = COLOR_SPACE_YCBCR601;
5817 case PIXEL_ENCODING_RGB:
5818 color_space = COLOR_SPACE_SRGB;
5829 static bool adjust_colour_depth_from_display_info(
5830 struct dc_crtc_timing *timing_out,
5831 const struct drm_display_info *info)
5833 enum dc_color_depth depth = timing_out->display_color_depth;
5836 normalized_clk = timing_out->pix_clk_100hz / 10;
5837 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5838 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5839 normalized_clk /= 2;
5840 /* Adjusting pix clock following on HDMI spec based on colour depth */
5842 case COLOR_DEPTH_888:
5844 case COLOR_DEPTH_101010:
5845 normalized_clk = (normalized_clk * 30) / 24;
5847 case COLOR_DEPTH_121212:
5848 normalized_clk = (normalized_clk * 36) / 24;
5850 case COLOR_DEPTH_161616:
5851 normalized_clk = (normalized_clk * 48) / 24;
5854 /* The above depths are the only ones valid for HDMI. */
5857 if (normalized_clk <= info->max_tmds_clock) {
5858 timing_out->display_color_depth = depth;
5861 } while (--depth > COLOR_DEPTH_666);
5865 static void fill_stream_properties_from_drm_display_mode(
5866 struct dc_stream_state *stream,
5867 const struct drm_display_mode *mode_in,
5868 const struct drm_connector *connector,
5869 const struct drm_connector_state *connector_state,
5870 const struct dc_stream_state *old_stream,
5873 struct dc_crtc_timing *timing_out = &stream->timing;
5874 const struct drm_display_info *info = &connector->display_info;
5875 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5876 struct hdmi_vendor_infoframe hv_frame;
5877 struct hdmi_avi_infoframe avi_frame;
5879 memset(&hv_frame, 0, sizeof(hv_frame));
5880 memset(&avi_frame, 0, sizeof(avi_frame));
5882 timing_out->h_border_left = 0;
5883 timing_out->h_border_right = 0;
5884 timing_out->v_border_top = 0;
5885 timing_out->v_border_bottom = 0;
5886 /* TODO: un-hardcode */
5887 if (drm_mode_is_420_only(info, mode_in)
5888 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5889 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5890 else if (drm_mode_is_420_also(info, mode_in)
5891 && aconnector->force_yuv420_output)
5892 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5893 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5894 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5895 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5897 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5899 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5900 timing_out->display_color_depth = convert_color_depth_from_display_info(
5902 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5904 timing_out->scan_type = SCANNING_TYPE_NODATA;
5905 timing_out->hdmi_vic = 0;
5908 timing_out->vic = old_stream->timing.vic;
5909 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5910 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5912 timing_out->vic = drm_match_cea_mode(mode_in);
5913 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5914 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5915 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5916 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5919 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5920 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5921 timing_out->vic = avi_frame.video_code;
5922 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5923 timing_out->hdmi_vic = hv_frame.vic;
5926 if (is_freesync_video_mode(mode_in, aconnector)) {
5927 timing_out->h_addressable = mode_in->hdisplay;
5928 timing_out->h_total = mode_in->htotal;
5929 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5930 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5931 timing_out->v_total = mode_in->vtotal;
5932 timing_out->v_addressable = mode_in->vdisplay;
5933 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5934 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5935 timing_out->pix_clk_100hz = mode_in->clock * 10;
5937 timing_out->h_addressable = mode_in->crtc_hdisplay;
5938 timing_out->h_total = mode_in->crtc_htotal;
5939 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5940 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5941 timing_out->v_total = mode_in->crtc_vtotal;
5942 timing_out->v_addressable = mode_in->crtc_vdisplay;
5943 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5944 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5945 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5948 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5950 stream->output_color_space = get_output_color_space(timing_out);
5952 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5953 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5954 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5955 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5956 drm_mode_is_420_also(info, mode_in) &&
5957 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5958 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5959 adjust_colour_depth_from_display_info(timing_out, info);
5964 static void fill_audio_info(struct audio_info *audio_info,
5965 const struct drm_connector *drm_connector,
5966 const struct dc_sink *dc_sink)
5969 int cea_revision = 0;
5970 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5972 audio_info->manufacture_id = edid_caps->manufacturer_id;
5973 audio_info->product_id = edid_caps->product_id;
5975 cea_revision = drm_connector->display_info.cea_rev;
5977 strscpy(audio_info->display_name,
5978 edid_caps->display_name,
5979 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5981 if (cea_revision >= 3) {
5982 audio_info->mode_count = edid_caps->audio_mode_count;
5984 for (i = 0; i < audio_info->mode_count; ++i) {
5985 audio_info->modes[i].format_code =
5986 (enum audio_format_code)
5987 (edid_caps->audio_modes[i].format_code);
5988 audio_info->modes[i].channel_count =
5989 edid_caps->audio_modes[i].channel_count;
5990 audio_info->modes[i].sample_rates.all =
5991 edid_caps->audio_modes[i].sample_rate;
5992 audio_info->modes[i].sample_size =
5993 edid_caps->audio_modes[i].sample_size;
5997 audio_info->flags.all = edid_caps->speaker_flags;
5999 /* TODO: We only check for the progressive mode, check for interlace mode too */
6000 if (drm_connector->latency_present[0]) {
6001 audio_info->video_latency = drm_connector->video_latency[0];
6002 audio_info->audio_latency = drm_connector->audio_latency[0];
6005 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6010 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6011 struct drm_display_mode *dst_mode)
6013 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6014 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6015 dst_mode->crtc_clock = src_mode->crtc_clock;
6016 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6017 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6018 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
6019 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6020 dst_mode->crtc_htotal = src_mode->crtc_htotal;
6021 dst_mode->crtc_hskew = src_mode->crtc_hskew;
6022 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6023 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6024 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6025 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6026 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6030 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6031 const struct drm_display_mode *native_mode,
6034 if (scale_enabled) {
6035 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6036 } else if (native_mode->clock == drm_mode->clock &&
6037 native_mode->htotal == drm_mode->htotal &&
6038 native_mode->vtotal == drm_mode->vtotal) {
6039 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6041 /* no scaling nor amdgpu inserted, no need to patch */
6045 static struct dc_sink *
6046 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6048 struct dc_sink_init_data sink_init_data = { 0 };
6049 struct dc_sink *sink = NULL;
6050 sink_init_data.link = aconnector->dc_link;
6051 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6053 sink = dc_sink_create(&sink_init_data);
6055 DRM_ERROR("Failed to create sink!\n");
6058 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6063 static void set_multisync_trigger_params(
6064 struct dc_stream_state *stream)
6066 struct dc_stream_state *master = NULL;
6068 if (stream->triggered_crtc_reset.enabled) {
6069 master = stream->triggered_crtc_reset.event_source;
6070 stream->triggered_crtc_reset.event =
6071 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6072 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6073 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6077 static void set_master_stream(struct dc_stream_state *stream_set[],
6080 int j, highest_rfr = 0, master_stream = 0;
6082 for (j = 0; j < stream_count; j++) {
6083 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6084 int refresh_rate = 0;
6086 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6087 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6088 if (refresh_rate > highest_rfr) {
6089 highest_rfr = refresh_rate;
6094 for (j = 0; j < stream_count; j++) {
6096 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6100 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6103 struct dc_stream_state *stream;
6105 if (context->stream_count < 2)
6107 for (i = 0; i < context->stream_count ; i++) {
6108 if (!context->streams[i])
6111 * TODO: add a function to read AMD VSDB bits and set
6112 * crtc_sync_master.multi_sync_enabled flag
6113 * For now it's set to false
6117 set_master_stream(context->streams, context->stream_count);
6119 for (i = 0; i < context->stream_count ; i++) {
6120 stream = context->streams[i];
6125 set_multisync_trigger_params(stream);
6129 #if defined(CONFIG_DRM_AMD_DC_DCN)
6130 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6131 struct dc_sink *sink, struct dc_stream_state *stream,
6132 struct dsc_dec_dpcd_caps *dsc_caps)
6134 stream->timing.flags.DSC = 0;
6135 dsc_caps->is_dsc_supported = false;
6137 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6138 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6139 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6140 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6141 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6142 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6143 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6148 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6149 struct dc_sink *sink, struct dc_stream_state *stream,
6150 struct dsc_dec_dpcd_caps *dsc_caps,
6151 uint32_t max_dsc_target_bpp_limit_override)
6153 const struct dc_link_settings *verified_link_cap = NULL;
6154 uint32_t link_bw_in_kbps;
6155 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6156 struct dc *dc = sink->ctx->dc;
6157 struct dc_dsc_bw_range bw_range = {0};
6158 struct dc_dsc_config dsc_cfg = {0};
6160 verified_link_cap = dc_link_get_link_cap(stream->link);
6161 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6162 edp_min_bpp_x16 = 8 * 16;
6163 edp_max_bpp_x16 = 8 * 16;
6165 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6166 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6168 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6169 edp_min_bpp_x16 = edp_max_bpp_x16;
6171 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6172 dc->debug.dsc_min_slice_height_override,
6173 edp_min_bpp_x16, edp_max_bpp_x16,
6178 if (bw_range.max_kbps < link_bw_in_kbps) {
6179 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6181 dc->debug.dsc_min_slice_height_override,
6182 max_dsc_target_bpp_limit_override,
6186 stream->timing.dsc_cfg = dsc_cfg;
6187 stream->timing.flags.DSC = 1;
6188 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6194 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6196 dc->debug.dsc_min_slice_height_override,
6197 max_dsc_target_bpp_limit_override,
6201 stream->timing.dsc_cfg = dsc_cfg;
6202 stream->timing.flags.DSC = 1;
6206 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6207 struct dc_sink *sink, struct dc_stream_state *stream,
6208 struct dsc_dec_dpcd_caps *dsc_caps)
6210 struct drm_connector *drm_connector = &aconnector->base;
6211 uint32_t link_bandwidth_kbps;
6212 uint32_t max_dsc_target_bpp_limit_override = 0;
6213 struct dc *dc = sink->ctx->dc;
6214 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6215 uint32_t dsc_max_supported_bw_in_kbps;
6217 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6218 dc_link_get_link_cap(aconnector->dc_link));
6220 if (stream->link && stream->link->local_sink)
6221 max_dsc_target_bpp_limit_override =
6222 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6224 /* Set DSC policy according to dsc_clock_en */
6225 dc_dsc_policy_set_enable_dsc_when_not_needed(
6226 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6228 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6229 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6231 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6233 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6234 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6235 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6237 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6238 max_dsc_target_bpp_limit_override,
6239 link_bandwidth_kbps,
6241 &stream->timing.dsc_cfg)) {
6242 stream->timing.flags.DSC = 1;
6243 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6244 __func__, drm_connector->name);
6246 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6247 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6248 max_supported_bw_in_kbps = link_bandwidth_kbps;
6249 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6251 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6252 max_supported_bw_in_kbps > 0 &&
6253 dsc_max_supported_bw_in_kbps > 0)
6254 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6256 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6257 max_dsc_target_bpp_limit_override,
6258 dsc_max_supported_bw_in_kbps,
6260 &stream->timing.dsc_cfg)) {
6261 stream->timing.flags.DSC = 1;
6262 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6263 __func__, drm_connector->name);
6268 /* Overwrite the stream flag if DSC is enabled through debugfs */
6269 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6270 stream->timing.flags.DSC = 1;
6272 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6273 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6275 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6276 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6278 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6279 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6281 #endif /* CONFIG_DRM_AMD_DC_DCN */
6284 * DOC: FreeSync Video
6286 * When a userspace application wants to play a video, the content follows a
6287 * standard format definition that usually specifies the FPS for that format.
6288 * The below list illustrates some video format and the expected FPS,
6291 * - TV/NTSC (23.976 FPS)
6294 * - TV/NTSC (29.97 FPS)
6295 * - TV/NTSC (30 FPS)
6296 * - Cinema HFR (48 FPS)
6298 * - Commonly used (60 FPS)
6299 * - Multiples of 24 (48,72,96,120 FPS)
6301 * The list of standards video format is not huge and can be added to the
6302 * connector modeset list beforehand. With that, userspace can leverage
6303 * FreeSync to extends the front porch in order to attain the target refresh
6304 * rate. Such a switch will happen seamlessly, without screen blanking or
6305 * reprogramming of the output in any other way. If the userspace requests a
6306 * modesetting change compatible with FreeSync modes that only differ in the
6307 * refresh rate, DC will skip the full update and avoid blink during the
6308 * transition. For example, the video player can change the modesetting from
6309 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6310 * causing any display blink. This same concept can be applied to a mode
6313 static struct drm_display_mode *
6314 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6315 bool use_probed_modes)
6317 struct drm_display_mode *m, *m_pref = NULL;
6318 u16 current_refresh, highest_refresh;
6319 struct list_head *list_head = use_probed_modes ?
6320 &aconnector->base.probed_modes :
6321 &aconnector->base.modes;
6323 if (aconnector->freesync_vid_base.clock != 0)
6324 return &aconnector->freesync_vid_base;
6326 /* Find the preferred mode */
6327 list_for_each_entry (m, list_head, head) {
6328 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6335 /* Probably an EDID with no preferred mode. Fallback to first entry */
6336 m_pref = list_first_entry_or_null(
6337 &aconnector->base.modes, struct drm_display_mode, head);
6339 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6344 highest_refresh = drm_mode_vrefresh(m_pref);
6347 * Find the mode with highest refresh rate with same resolution.
6348 * For some monitors, preferred mode is not the mode with highest
6349 * supported refresh rate.
6351 list_for_each_entry (m, list_head, head) {
6352 current_refresh = drm_mode_vrefresh(m);
6354 if (m->hdisplay == m_pref->hdisplay &&
6355 m->vdisplay == m_pref->vdisplay &&
6356 highest_refresh < current_refresh) {
6357 highest_refresh = current_refresh;
6362 aconnector->freesync_vid_base = *m_pref;
6366 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6367 struct amdgpu_dm_connector *aconnector)
6369 struct drm_display_mode *high_mode;
6372 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6373 if (!high_mode || !mode)
6376 timing_diff = high_mode->vtotal - mode->vtotal;
6378 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6379 high_mode->hdisplay != mode->hdisplay ||
6380 high_mode->vdisplay != mode->vdisplay ||
6381 high_mode->hsync_start != mode->hsync_start ||
6382 high_mode->hsync_end != mode->hsync_end ||
6383 high_mode->htotal != mode->htotal ||
6384 high_mode->hskew != mode->hskew ||
6385 high_mode->vscan != mode->vscan ||
6386 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6387 high_mode->vsync_end - mode->vsync_end != timing_diff)
6393 static struct dc_stream_state *
6394 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6395 const struct drm_display_mode *drm_mode,
6396 const struct dm_connector_state *dm_state,
6397 const struct dc_stream_state *old_stream,
6400 struct drm_display_mode *preferred_mode = NULL;
6401 struct drm_connector *drm_connector;
6402 const struct drm_connector_state *con_state =
6403 dm_state ? &dm_state->base : NULL;
6404 struct dc_stream_state *stream = NULL;
6405 struct drm_display_mode mode = *drm_mode;
6406 struct drm_display_mode saved_mode;
6407 struct drm_display_mode *freesync_mode = NULL;
6408 bool native_mode_found = false;
6409 bool recalculate_timing = false;
6410 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6412 int preferred_refresh = 0;
6413 #if defined(CONFIG_DRM_AMD_DC_DCN)
6414 struct dsc_dec_dpcd_caps dsc_caps;
6416 struct dc_sink *sink = NULL;
6418 memset(&saved_mode, 0, sizeof(saved_mode));
6420 if (aconnector == NULL) {
6421 DRM_ERROR("aconnector is NULL!\n");
6425 drm_connector = &aconnector->base;
6427 if (!aconnector->dc_sink) {
6428 sink = create_fake_sink(aconnector);
6432 sink = aconnector->dc_sink;
6433 dc_sink_retain(sink);
6436 stream = dc_create_stream_for_sink(sink);
6438 if (stream == NULL) {
6439 DRM_ERROR("Failed to create stream for sink!\n");
6443 stream->dm_stream_context = aconnector;
6445 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6446 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6448 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6449 /* Search for preferred mode */
6450 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6451 native_mode_found = true;
6455 if (!native_mode_found)
6456 preferred_mode = list_first_entry_or_null(
6457 &aconnector->base.modes,
6458 struct drm_display_mode,
6461 mode_refresh = drm_mode_vrefresh(&mode);
6463 if (preferred_mode == NULL) {
6465 * This may not be an error, the use case is when we have no
6466 * usermode calls to reset and set mode upon hotplug. In this
6467 * case, we call set mode ourselves to restore the previous mode
6468 * and the modelist may not be filled in in time.
6470 DRM_DEBUG_DRIVER("No preferred mode found\n");
6472 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6473 if (recalculate_timing) {
6474 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6476 mode = *freesync_mode;
6478 decide_crtc_timing_for_drm_display_mode(
6479 &mode, preferred_mode, scale);
6481 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6485 if (recalculate_timing)
6486 drm_mode_set_crtcinfo(&saved_mode, 0);
6488 drm_mode_set_crtcinfo(&mode, 0);
6491 * If scaling is enabled and refresh rate didn't change
6492 * we copy the vic and polarities of the old timings
6494 if (!scale || mode_refresh != preferred_refresh)
6495 fill_stream_properties_from_drm_display_mode(
6496 stream, &mode, &aconnector->base, con_state, NULL,
6499 fill_stream_properties_from_drm_display_mode(
6500 stream, &mode, &aconnector->base, con_state, old_stream,
6503 #if defined(CONFIG_DRM_AMD_DC_DCN)
6504 /* SST DSC determination policy */
6505 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6506 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6507 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6510 update_stream_scaling_settings(&mode, dm_state, stream);
6513 &stream->audio_info,
6517 update_stream_signal(stream, sink);
6519 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6520 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6522 if (stream->link->psr_settings.psr_feature_enabled) {
6524 // should decide stream support vsc sdp colorimetry capability
6525 // before building vsc info packet
6527 stream->use_vsc_sdp_for_colorimetry = false;
6528 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6529 stream->use_vsc_sdp_for_colorimetry =
6530 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6532 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6533 stream->use_vsc_sdp_for_colorimetry = true;
6535 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6536 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6540 dc_sink_release(sink);
6545 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6547 drm_crtc_cleanup(crtc);
6551 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6552 struct drm_crtc_state *state)
6554 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6556 /* TODO Destroy dc_stream objects are stream object is flattened */
6558 dc_stream_release(cur->stream);
6561 __drm_atomic_helper_crtc_destroy_state(state);
6567 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6569 struct dm_crtc_state *state;
6572 dm_crtc_destroy_state(crtc, crtc->state);
6574 state = kzalloc(sizeof(*state), GFP_KERNEL);
6575 if (WARN_ON(!state))
6578 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6581 static struct drm_crtc_state *
6582 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6584 struct dm_crtc_state *state, *cur;
6586 cur = to_dm_crtc_state(crtc->state);
6588 if (WARN_ON(!crtc->state))
6591 state = kzalloc(sizeof(*state), GFP_KERNEL);
6595 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6598 state->stream = cur->stream;
6599 dc_stream_retain(state->stream);
6602 state->active_planes = cur->active_planes;
6603 state->vrr_infopacket = cur->vrr_infopacket;
6604 state->abm_level = cur->abm_level;
6605 state->vrr_supported = cur->vrr_supported;
6606 state->freesync_config = cur->freesync_config;
6607 state->cm_has_degamma = cur->cm_has_degamma;
6608 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6609 state->force_dpms_off = cur->force_dpms_off;
6610 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6612 return &state->base;
6615 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6616 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6618 crtc_debugfs_init(crtc);
6624 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6626 enum dc_irq_source irq_source;
6627 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6628 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6631 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6633 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6635 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6636 acrtc->crtc_id, enable ? "en" : "dis", rc);
6640 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6642 enum dc_irq_source irq_source;
6643 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6644 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6645 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6646 #if defined(CONFIG_DRM_AMD_DC_DCN)
6647 struct amdgpu_display_manager *dm = &adev->dm;
6648 struct vblank_control_work *work;
6653 /* vblank irq on -> Only need vupdate irq in vrr mode */
6654 if (amdgpu_dm_vrr_active(acrtc_state))
6655 rc = dm_set_vupdate_irq(crtc, true);
6657 /* vblank irq off -> vupdate irq off */
6658 rc = dm_set_vupdate_irq(crtc, false);
6664 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6666 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6669 if (amdgpu_in_reset(adev))
6672 #if defined(CONFIG_DRM_AMD_DC_DCN)
6673 if (dm->vblank_control_workqueue) {
6674 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6678 INIT_WORK(&work->work, vblank_control_worker);
6680 work->acrtc = acrtc;
6681 work->enable = enable;
6683 if (acrtc_state->stream) {
6684 dc_stream_retain(acrtc_state->stream);
6685 work->stream = acrtc_state->stream;
6688 queue_work(dm->vblank_control_workqueue, &work->work);
6695 static int dm_enable_vblank(struct drm_crtc *crtc)
6697 return dm_set_vblank(crtc, true);
6700 static void dm_disable_vblank(struct drm_crtc *crtc)
6702 dm_set_vblank(crtc, false);
6705 /* Implemented only the options currently availible for the driver */
6706 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6707 .reset = dm_crtc_reset_state,
6708 .destroy = amdgpu_dm_crtc_destroy,
6709 .set_config = drm_atomic_helper_set_config,
6710 .page_flip = drm_atomic_helper_page_flip,
6711 .atomic_duplicate_state = dm_crtc_duplicate_state,
6712 .atomic_destroy_state = dm_crtc_destroy_state,
6713 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6714 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6715 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6716 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6717 .enable_vblank = dm_enable_vblank,
6718 .disable_vblank = dm_disable_vblank,
6719 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6720 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6721 .late_register = amdgpu_dm_crtc_late_register,
6725 static enum drm_connector_status
6726 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6729 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6733 * 1. This interface is NOT called in context of HPD irq.
6734 * 2. This interface *is called* in context of user-mode ioctl. Which
6735 * makes it a bad place for *any* MST-related activity.
6738 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6739 !aconnector->fake_enable)
6740 connected = (aconnector->dc_sink != NULL);
6742 connected = (aconnector->base.force == DRM_FORCE_ON);
6744 update_subconnector_property(aconnector);
6746 return (connected ? connector_status_connected :
6747 connector_status_disconnected);
6750 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6751 struct drm_connector_state *connector_state,
6752 struct drm_property *property,
6755 struct drm_device *dev = connector->dev;
6756 struct amdgpu_device *adev = drm_to_adev(dev);
6757 struct dm_connector_state *dm_old_state =
6758 to_dm_connector_state(connector->state);
6759 struct dm_connector_state *dm_new_state =
6760 to_dm_connector_state(connector_state);
6764 if (property == dev->mode_config.scaling_mode_property) {
6765 enum amdgpu_rmx_type rmx_type;
6768 case DRM_MODE_SCALE_CENTER:
6769 rmx_type = RMX_CENTER;
6771 case DRM_MODE_SCALE_ASPECT:
6772 rmx_type = RMX_ASPECT;
6774 case DRM_MODE_SCALE_FULLSCREEN:
6775 rmx_type = RMX_FULL;
6777 case DRM_MODE_SCALE_NONE:
6783 if (dm_old_state->scaling == rmx_type)
6786 dm_new_state->scaling = rmx_type;
6788 } else if (property == adev->mode_info.underscan_hborder_property) {
6789 dm_new_state->underscan_hborder = val;
6791 } else if (property == adev->mode_info.underscan_vborder_property) {
6792 dm_new_state->underscan_vborder = val;
6794 } else if (property == adev->mode_info.underscan_property) {
6795 dm_new_state->underscan_enable = val;
6797 } else if (property == adev->mode_info.abm_level_property) {
6798 dm_new_state->abm_level = val;
6805 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6806 const struct drm_connector_state *state,
6807 struct drm_property *property,
6810 struct drm_device *dev = connector->dev;
6811 struct amdgpu_device *adev = drm_to_adev(dev);
6812 struct dm_connector_state *dm_state =
6813 to_dm_connector_state(state);
6816 if (property == dev->mode_config.scaling_mode_property) {
6817 switch (dm_state->scaling) {
6819 *val = DRM_MODE_SCALE_CENTER;
6822 *val = DRM_MODE_SCALE_ASPECT;
6825 *val = DRM_MODE_SCALE_FULLSCREEN;
6829 *val = DRM_MODE_SCALE_NONE;
6833 } else if (property == adev->mode_info.underscan_hborder_property) {
6834 *val = dm_state->underscan_hborder;
6836 } else if (property == adev->mode_info.underscan_vborder_property) {
6837 *val = dm_state->underscan_vborder;
6839 } else if (property == adev->mode_info.underscan_property) {
6840 *val = dm_state->underscan_enable;
6842 } else if (property == adev->mode_info.abm_level_property) {
6843 *val = dm_state->abm_level;
6850 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6852 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6854 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6857 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6859 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6860 const struct dc_link *link = aconnector->dc_link;
6861 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6862 struct amdgpu_display_manager *dm = &adev->dm;
6866 * Call only if mst_mgr was iniitalized before since it's not done
6867 * for all connector types.
6869 if (aconnector->mst_mgr.dev)
6870 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6872 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6873 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6874 for (i = 0; i < dm->num_of_edps; i++) {
6875 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6876 backlight_device_unregister(dm->backlight_dev[i]);
6877 dm->backlight_dev[i] = NULL;
6882 if (aconnector->dc_em_sink)
6883 dc_sink_release(aconnector->dc_em_sink);
6884 aconnector->dc_em_sink = NULL;
6885 if (aconnector->dc_sink)
6886 dc_sink_release(aconnector->dc_sink);
6887 aconnector->dc_sink = NULL;
6889 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6890 drm_connector_unregister(connector);
6891 drm_connector_cleanup(connector);
6892 if (aconnector->i2c) {
6893 i2c_del_adapter(&aconnector->i2c->base);
6894 kfree(aconnector->i2c);
6896 kfree(aconnector->dm_dp_aux.aux.name);
6901 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6903 struct dm_connector_state *state =
6904 to_dm_connector_state(connector->state);
6906 if (connector->state)
6907 __drm_atomic_helper_connector_destroy_state(connector->state);
6911 state = kzalloc(sizeof(*state), GFP_KERNEL);
6914 state->scaling = RMX_OFF;
6915 state->underscan_enable = false;
6916 state->underscan_hborder = 0;
6917 state->underscan_vborder = 0;
6918 state->base.max_requested_bpc = 8;
6919 state->vcpi_slots = 0;
6921 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6922 state->abm_level = amdgpu_dm_abm_level;
6924 __drm_atomic_helper_connector_reset(connector, &state->base);
6928 struct drm_connector_state *
6929 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6931 struct dm_connector_state *state =
6932 to_dm_connector_state(connector->state);
6934 struct dm_connector_state *new_state =
6935 kmemdup(state, sizeof(*state), GFP_KERNEL);
6940 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6942 new_state->freesync_capable = state->freesync_capable;
6943 new_state->abm_level = state->abm_level;
6944 new_state->scaling = state->scaling;
6945 new_state->underscan_enable = state->underscan_enable;
6946 new_state->underscan_hborder = state->underscan_hborder;
6947 new_state->underscan_vborder = state->underscan_vborder;
6948 new_state->vcpi_slots = state->vcpi_slots;
6949 new_state->pbn = state->pbn;
6950 return &new_state->base;
6954 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6956 struct amdgpu_dm_connector *amdgpu_dm_connector =
6957 to_amdgpu_dm_connector(connector);
6960 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6961 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6962 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6963 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6968 #if defined(CONFIG_DEBUG_FS)
6969 connector_debugfs_init(amdgpu_dm_connector);
6975 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6976 .reset = amdgpu_dm_connector_funcs_reset,
6977 .detect = amdgpu_dm_connector_detect,
6978 .fill_modes = drm_helper_probe_single_connector_modes,
6979 .destroy = amdgpu_dm_connector_destroy,
6980 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6981 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6982 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6983 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6984 .late_register = amdgpu_dm_connector_late_register,
6985 .early_unregister = amdgpu_dm_connector_unregister
6988 static int get_modes(struct drm_connector *connector)
6990 return amdgpu_dm_connector_get_modes(connector);
6993 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6995 struct dc_sink_init_data init_params = {
6996 .link = aconnector->dc_link,
6997 .sink_signal = SIGNAL_TYPE_VIRTUAL
7001 if (!aconnector->base.edid_blob_ptr) {
7002 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7003 aconnector->base.name);
7005 aconnector->base.force = DRM_FORCE_OFF;
7006 aconnector->base.override_edid = false;
7010 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7012 aconnector->edid = edid;
7014 aconnector->dc_em_sink = dc_link_add_remote_sink(
7015 aconnector->dc_link,
7017 (edid->extensions + 1) * EDID_LENGTH,
7020 if (aconnector->base.force == DRM_FORCE_ON) {
7021 aconnector->dc_sink = aconnector->dc_link->local_sink ?
7022 aconnector->dc_link->local_sink :
7023 aconnector->dc_em_sink;
7024 dc_sink_retain(aconnector->dc_sink);
7028 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7030 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7033 * In case of headless boot with force on for DP managed connector
7034 * Those settings have to be != 0 to get initial modeset
7036 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7037 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7038 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7042 aconnector->base.override_edid = true;
7043 create_eml_sink(aconnector);
7046 struct dc_stream_state *
7047 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7048 const struct drm_display_mode *drm_mode,
7049 const struct dm_connector_state *dm_state,
7050 const struct dc_stream_state *old_stream)
7052 struct drm_connector *connector = &aconnector->base;
7053 struct amdgpu_device *adev = drm_to_adev(connector->dev);
7054 struct dc_stream_state *stream;
7055 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7056 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7057 enum dc_status dc_result = DC_OK;
7060 stream = create_stream_for_sink(aconnector, drm_mode,
7061 dm_state, old_stream,
7063 if (stream == NULL) {
7064 DRM_ERROR("Failed to create stream for sink!\n");
7068 dc_result = dc_validate_stream(adev->dm.dc, stream);
7070 if (dc_result != DC_OK) {
7071 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7076 dc_status_to_str(dc_result));
7078 dc_stream_release(stream);
7080 requested_bpc -= 2; /* lower bpc to retry validation */
7083 } while (stream == NULL && requested_bpc >= 6);
7085 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7086 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7088 aconnector->force_yuv420_output = true;
7089 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7090 dm_state, old_stream);
7091 aconnector->force_yuv420_output = false;
7097 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7098 struct drm_display_mode *mode)
7100 int result = MODE_ERROR;
7101 struct dc_sink *dc_sink;
7102 /* TODO: Unhardcode stream count */
7103 struct dc_stream_state *stream;
7104 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7106 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7107 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7111 * Only run this the first time mode_valid is called to initilialize
7114 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7115 !aconnector->dc_em_sink)
7116 handle_edid_mgmt(aconnector);
7118 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7120 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7121 aconnector->base.force != DRM_FORCE_ON) {
7122 DRM_ERROR("dc_sink is NULL!\n");
7126 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7128 dc_stream_release(stream);
7133 /* TODO: error handling*/
7137 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7138 struct dc_info_packet *out)
7140 struct hdmi_drm_infoframe frame;
7141 unsigned char buf[30]; /* 26 + 4 */
7145 memset(out, 0, sizeof(*out));
7147 if (!state->hdr_output_metadata)
7150 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7154 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7158 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7162 /* Prepare the infopacket for DC. */
7163 switch (state->connector->connector_type) {
7164 case DRM_MODE_CONNECTOR_HDMIA:
7165 out->hb0 = 0x87; /* type */
7166 out->hb1 = 0x01; /* version */
7167 out->hb2 = 0x1A; /* length */
7168 out->sb[0] = buf[3]; /* checksum */
7172 case DRM_MODE_CONNECTOR_DisplayPort:
7173 case DRM_MODE_CONNECTOR_eDP:
7174 out->hb0 = 0x00; /* sdp id, zero */
7175 out->hb1 = 0x87; /* type */
7176 out->hb2 = 0x1D; /* payload len - 1 */
7177 out->hb3 = (0x13 << 2); /* sdp version */
7178 out->sb[0] = 0x01; /* version */
7179 out->sb[1] = 0x1A; /* length */
7187 memcpy(&out->sb[i], &buf[4], 26);
7190 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7191 sizeof(out->sb), false);
7197 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7198 struct drm_atomic_state *state)
7200 struct drm_connector_state *new_con_state =
7201 drm_atomic_get_new_connector_state(state, conn);
7202 struct drm_connector_state *old_con_state =
7203 drm_atomic_get_old_connector_state(state, conn);
7204 struct drm_crtc *crtc = new_con_state->crtc;
7205 struct drm_crtc_state *new_crtc_state;
7208 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7213 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7214 struct dc_info_packet hdr_infopacket;
7216 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7220 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7221 if (IS_ERR(new_crtc_state))
7222 return PTR_ERR(new_crtc_state);
7225 * DC considers the stream backends changed if the
7226 * static metadata changes. Forcing the modeset also
7227 * gives a simple way for userspace to switch from
7228 * 8bpc to 10bpc when setting the metadata to enter
7231 * Changing the static metadata after it's been
7232 * set is permissible, however. So only force a
7233 * modeset if we're entering or exiting HDR.
7235 new_crtc_state->mode_changed =
7236 !old_con_state->hdr_output_metadata ||
7237 !new_con_state->hdr_output_metadata;
7243 static const struct drm_connector_helper_funcs
7244 amdgpu_dm_connector_helper_funcs = {
7246 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7247 * modes will be filtered by drm_mode_validate_size(), and those modes
7248 * are missing after user start lightdm. So we need to renew modes list.
7249 * in get_modes call back, not just return the modes count
7251 .get_modes = get_modes,
7252 .mode_valid = amdgpu_dm_connector_mode_valid,
7253 .atomic_check = amdgpu_dm_connector_atomic_check,
7256 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7260 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7262 struct drm_atomic_state *state = new_crtc_state->state;
7263 struct drm_plane *plane;
7266 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7267 struct drm_plane_state *new_plane_state;
7269 /* Cursor planes are "fake". */
7270 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7273 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7275 if (!new_plane_state) {
7277 * The plane is enable on the CRTC and hasn't changed
7278 * state. This means that it previously passed
7279 * validation and is therefore enabled.
7285 /* We need a framebuffer to be considered enabled. */
7286 num_active += (new_plane_state->fb != NULL);
7292 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7293 struct drm_crtc_state *new_crtc_state)
7295 struct dm_crtc_state *dm_new_crtc_state =
7296 to_dm_crtc_state(new_crtc_state);
7298 dm_new_crtc_state->active_planes = 0;
7300 if (!dm_new_crtc_state->stream)
7303 dm_new_crtc_state->active_planes =
7304 count_crtc_active_planes(new_crtc_state);
7307 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7308 struct drm_atomic_state *state)
7310 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7312 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7313 struct dc *dc = adev->dm.dc;
7314 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7317 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7319 dm_update_crtc_active_planes(crtc, crtc_state);
7321 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7322 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7327 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7328 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7329 * planes are disabled, which is not supported by the hardware. And there is legacy
7330 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7332 if (crtc_state->enable &&
7333 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7334 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7338 /* In some use cases, like reset, no stream is attached */
7339 if (!dm_crtc_state->stream)
7342 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7345 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7349 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7350 const struct drm_display_mode *mode,
7351 struct drm_display_mode *adjusted_mode)
7356 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7357 .disable = dm_crtc_helper_disable,
7358 .atomic_check = dm_crtc_helper_atomic_check,
7359 .mode_fixup = dm_crtc_helper_mode_fixup,
7360 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7363 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7368 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7370 switch (display_color_depth) {
7371 case COLOR_DEPTH_666:
7373 case COLOR_DEPTH_888:
7375 case COLOR_DEPTH_101010:
7377 case COLOR_DEPTH_121212:
7379 case COLOR_DEPTH_141414:
7381 case COLOR_DEPTH_161616:
7389 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7390 struct drm_crtc_state *crtc_state,
7391 struct drm_connector_state *conn_state)
7393 struct drm_atomic_state *state = crtc_state->state;
7394 struct drm_connector *connector = conn_state->connector;
7395 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7396 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7397 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7398 struct drm_dp_mst_topology_mgr *mst_mgr;
7399 struct drm_dp_mst_port *mst_port;
7400 enum dc_color_depth color_depth;
7402 bool is_y420 = false;
7404 if (!aconnector->port || !aconnector->dc_sink)
7407 mst_port = aconnector->port;
7408 mst_mgr = &aconnector->mst_port->mst_mgr;
7410 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7413 if (!state->duplicated) {
7414 int max_bpc = conn_state->max_requested_bpc;
7415 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7416 aconnector->force_yuv420_output;
7417 color_depth = convert_color_depth_from_display_info(connector,
7420 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7421 clock = adjusted_mode->clock;
7422 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7424 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7427 dm_new_connector_state->pbn,
7428 dm_mst_get_pbn_divider(aconnector->dc_link));
7429 if (dm_new_connector_state->vcpi_slots < 0) {
7430 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7431 return dm_new_connector_state->vcpi_slots;
7436 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7437 .disable = dm_encoder_helper_disable,
7438 .atomic_check = dm_encoder_helper_atomic_check
7441 #if defined(CONFIG_DRM_AMD_DC_DCN)
7442 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7443 struct dc_state *dc_state,
7444 struct dsc_mst_fairness_vars *vars)
7446 struct dc_stream_state *stream = NULL;
7447 struct drm_connector *connector;
7448 struct drm_connector_state *new_con_state;
7449 struct amdgpu_dm_connector *aconnector;
7450 struct dm_connector_state *dm_conn_state;
7452 int vcpi, pbn_div, pbn, slot_num = 0;
7454 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7456 aconnector = to_amdgpu_dm_connector(connector);
7458 if (!aconnector->port)
7461 if (!new_con_state || !new_con_state->crtc)
7464 dm_conn_state = to_dm_connector_state(new_con_state);
7466 for (j = 0; j < dc_state->stream_count; j++) {
7467 stream = dc_state->streams[j];
7471 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7480 pbn_div = dm_mst_get_pbn_divider(stream->link);
7481 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7482 for (j = 0; j < dc_state->stream_count; j++) {
7483 if (vars[j].aconnector == aconnector) {
7489 if (j == dc_state->stream_count)
7492 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7494 if (stream->timing.flags.DSC != 1) {
7495 dm_conn_state->pbn = pbn;
7496 dm_conn_state->vcpi_slots = slot_num;
7498 drm_dp_mst_atomic_enable_dsc(state,
7506 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7513 dm_conn_state->pbn = pbn;
7514 dm_conn_state->vcpi_slots = vcpi;
7520 static void dm_drm_plane_reset(struct drm_plane *plane)
7522 struct dm_plane_state *amdgpu_state = NULL;
7525 plane->funcs->atomic_destroy_state(plane, plane->state);
7527 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7528 WARN_ON(amdgpu_state == NULL);
7531 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7534 static struct drm_plane_state *
7535 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7537 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7539 old_dm_plane_state = to_dm_plane_state(plane->state);
7540 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7541 if (!dm_plane_state)
7544 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7546 if (old_dm_plane_state->dc_state) {
7547 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7548 dc_plane_state_retain(dm_plane_state->dc_state);
7551 return &dm_plane_state->base;
7554 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7555 struct drm_plane_state *state)
7557 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7559 if (dm_plane_state->dc_state)
7560 dc_plane_state_release(dm_plane_state->dc_state);
7562 drm_atomic_helper_plane_destroy_state(plane, state);
7565 static const struct drm_plane_funcs dm_plane_funcs = {
7566 .update_plane = drm_atomic_helper_update_plane,
7567 .disable_plane = drm_atomic_helper_disable_plane,
7568 .destroy = drm_primary_helper_destroy,
7569 .reset = dm_drm_plane_reset,
7570 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7571 .atomic_destroy_state = dm_drm_plane_destroy_state,
7572 .format_mod_supported = dm_plane_format_mod_supported,
7575 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7576 struct drm_plane_state *new_state)
7578 struct amdgpu_framebuffer *afb;
7579 struct drm_gem_object *obj;
7580 struct amdgpu_device *adev;
7581 struct amdgpu_bo *rbo;
7582 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7583 struct list_head list;
7584 struct ttm_validate_buffer tv;
7585 struct ww_acquire_ctx ticket;
7589 if (!new_state->fb) {
7590 DRM_DEBUG_KMS("No FB bound\n");
7594 afb = to_amdgpu_framebuffer(new_state->fb);
7595 obj = new_state->fb->obj[0];
7596 rbo = gem_to_amdgpu_bo(obj);
7597 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7598 INIT_LIST_HEAD(&list);
7602 list_add(&tv.head, &list);
7604 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7606 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7610 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7611 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7613 domain = AMDGPU_GEM_DOMAIN_VRAM;
7615 r = amdgpu_bo_pin(rbo, domain);
7616 if (unlikely(r != 0)) {
7617 if (r != -ERESTARTSYS)
7618 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7619 ttm_eu_backoff_reservation(&ticket, &list);
7623 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7624 if (unlikely(r != 0)) {
7625 amdgpu_bo_unpin(rbo);
7626 ttm_eu_backoff_reservation(&ticket, &list);
7627 DRM_ERROR("%p bind failed\n", rbo);
7631 ttm_eu_backoff_reservation(&ticket, &list);
7633 afb->address = amdgpu_bo_gpu_offset(rbo);
7638 * We don't do surface updates on planes that have been newly created,
7639 * but we also don't have the afb->address during atomic check.
7641 * Fill in buffer attributes depending on the address here, but only on
7642 * newly created planes since they're not being used by DC yet and this
7643 * won't modify global state.
7645 dm_plane_state_old = to_dm_plane_state(plane->state);
7646 dm_plane_state_new = to_dm_plane_state(new_state);
7648 if (dm_plane_state_new->dc_state &&
7649 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7650 struct dc_plane_state *plane_state =
7651 dm_plane_state_new->dc_state;
7652 bool force_disable_dcc = !plane_state->dcc.enable;
7654 fill_plane_buffer_attributes(
7655 adev, afb, plane_state->format, plane_state->rotation,
7657 &plane_state->tiling_info, &plane_state->plane_size,
7658 &plane_state->dcc, &plane_state->address,
7659 afb->tmz_surface, force_disable_dcc);
7665 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7666 struct drm_plane_state *old_state)
7668 struct amdgpu_bo *rbo;
7674 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7675 r = amdgpu_bo_reserve(rbo, false);
7677 DRM_ERROR("failed to reserve rbo before unpin\n");
7681 amdgpu_bo_unpin(rbo);
7682 amdgpu_bo_unreserve(rbo);
7683 amdgpu_bo_unref(&rbo);
7686 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7687 struct drm_crtc_state *new_crtc_state)
7689 struct drm_framebuffer *fb = state->fb;
7690 int min_downscale, max_upscale;
7692 int max_scale = INT_MAX;
7694 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7695 if (fb && state->crtc) {
7696 /* Validate viewport to cover the case when only the position changes */
7697 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7698 int viewport_width = state->crtc_w;
7699 int viewport_height = state->crtc_h;
7701 if (state->crtc_x < 0)
7702 viewport_width += state->crtc_x;
7703 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7704 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7706 if (state->crtc_y < 0)
7707 viewport_height += state->crtc_y;
7708 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7709 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7711 if (viewport_width < 0 || viewport_height < 0) {
7712 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7714 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7715 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7717 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7718 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7724 /* Get min/max allowed scaling factors from plane caps. */
7725 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7726 &min_downscale, &max_upscale);
7728 * Convert to drm convention: 16.16 fixed point, instead of dc's
7729 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7730 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7732 min_scale = (1000 << 16) / max_upscale;
7733 max_scale = (1000 << 16) / min_downscale;
7736 return drm_atomic_helper_check_plane_state(
7737 state, new_crtc_state, min_scale, max_scale, true, true);
7740 static int dm_plane_atomic_check(struct drm_plane *plane,
7741 struct drm_atomic_state *state)
7743 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7745 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7746 struct dc *dc = adev->dm.dc;
7747 struct dm_plane_state *dm_plane_state;
7748 struct dc_scaling_info scaling_info;
7749 struct drm_crtc_state *new_crtc_state;
7752 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7754 dm_plane_state = to_dm_plane_state(new_plane_state);
7756 if (!dm_plane_state->dc_state)
7760 drm_atomic_get_new_crtc_state(state,
7761 new_plane_state->crtc);
7762 if (!new_crtc_state)
7765 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7769 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7773 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7779 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7780 struct drm_atomic_state *state)
7782 /* Only support async updates on cursor planes. */
7783 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7789 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7790 struct drm_atomic_state *state)
7792 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7794 struct drm_plane_state *old_state =
7795 drm_atomic_get_old_plane_state(state, plane);
7797 trace_amdgpu_dm_atomic_update_cursor(new_state);
7799 swap(plane->state->fb, new_state->fb);
7801 plane->state->src_x = new_state->src_x;
7802 plane->state->src_y = new_state->src_y;
7803 plane->state->src_w = new_state->src_w;
7804 plane->state->src_h = new_state->src_h;
7805 plane->state->crtc_x = new_state->crtc_x;
7806 plane->state->crtc_y = new_state->crtc_y;
7807 plane->state->crtc_w = new_state->crtc_w;
7808 plane->state->crtc_h = new_state->crtc_h;
7810 handle_cursor_update(plane, old_state);
7813 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7814 .prepare_fb = dm_plane_helper_prepare_fb,
7815 .cleanup_fb = dm_plane_helper_cleanup_fb,
7816 .atomic_check = dm_plane_atomic_check,
7817 .atomic_async_check = dm_plane_atomic_async_check,
7818 .atomic_async_update = dm_plane_atomic_async_update
7822 * TODO: these are currently initialized to rgb formats only.
7823 * For future use cases we should either initialize them dynamically based on
7824 * plane capabilities, or initialize this array to all formats, so internal drm
7825 * check will succeed, and let DC implement proper check
7827 static const uint32_t rgb_formats[] = {
7828 DRM_FORMAT_XRGB8888,
7829 DRM_FORMAT_ARGB8888,
7830 DRM_FORMAT_RGBA8888,
7831 DRM_FORMAT_XRGB2101010,
7832 DRM_FORMAT_XBGR2101010,
7833 DRM_FORMAT_ARGB2101010,
7834 DRM_FORMAT_ABGR2101010,
7835 DRM_FORMAT_XRGB16161616,
7836 DRM_FORMAT_XBGR16161616,
7837 DRM_FORMAT_ARGB16161616,
7838 DRM_FORMAT_ABGR16161616,
7839 DRM_FORMAT_XBGR8888,
7840 DRM_FORMAT_ABGR8888,
7844 static const uint32_t overlay_formats[] = {
7845 DRM_FORMAT_XRGB8888,
7846 DRM_FORMAT_ARGB8888,
7847 DRM_FORMAT_RGBA8888,
7848 DRM_FORMAT_XBGR8888,
7849 DRM_FORMAT_ABGR8888,
7853 static const u32 cursor_formats[] = {
7857 static int get_plane_formats(const struct drm_plane *plane,
7858 const struct dc_plane_cap *plane_cap,
7859 uint32_t *formats, int max_formats)
7861 int i, num_formats = 0;
7864 * TODO: Query support for each group of formats directly from
7865 * DC plane caps. This will require adding more formats to the
7869 switch (plane->type) {
7870 case DRM_PLANE_TYPE_PRIMARY:
7871 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7872 if (num_formats >= max_formats)
7875 formats[num_formats++] = rgb_formats[i];
7878 if (plane_cap && plane_cap->pixel_format_support.nv12)
7879 formats[num_formats++] = DRM_FORMAT_NV12;
7880 if (plane_cap && plane_cap->pixel_format_support.p010)
7881 formats[num_formats++] = DRM_FORMAT_P010;
7882 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7883 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7884 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7885 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7886 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7890 case DRM_PLANE_TYPE_OVERLAY:
7891 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7892 if (num_formats >= max_formats)
7895 formats[num_formats++] = overlay_formats[i];
7899 case DRM_PLANE_TYPE_CURSOR:
7900 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7901 if (num_formats >= max_formats)
7904 formats[num_formats++] = cursor_formats[i];
7912 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7913 struct drm_plane *plane,
7914 unsigned long possible_crtcs,
7915 const struct dc_plane_cap *plane_cap)
7917 uint32_t formats[32];
7920 unsigned int supported_rotations;
7921 uint64_t *modifiers = NULL;
7923 num_formats = get_plane_formats(plane, plane_cap, formats,
7924 ARRAY_SIZE(formats));
7926 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7930 if (modifiers == NULL)
7931 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7933 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7934 &dm_plane_funcs, formats, num_formats,
7935 modifiers, plane->type, NULL);
7940 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7941 plane_cap && plane_cap->per_pixel_alpha) {
7942 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7943 BIT(DRM_MODE_BLEND_PREMULTI);
7945 drm_plane_create_alpha_property(plane);
7946 drm_plane_create_blend_mode_property(plane, blend_caps);
7949 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7951 (plane_cap->pixel_format_support.nv12 ||
7952 plane_cap->pixel_format_support.p010)) {
7953 /* This only affects YUV formats. */
7954 drm_plane_create_color_properties(
7956 BIT(DRM_COLOR_YCBCR_BT601) |
7957 BIT(DRM_COLOR_YCBCR_BT709) |
7958 BIT(DRM_COLOR_YCBCR_BT2020),
7959 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7960 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7961 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7964 supported_rotations =
7965 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7966 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7968 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7969 plane->type != DRM_PLANE_TYPE_CURSOR)
7970 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7971 supported_rotations);
7973 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7975 /* Create (reset) the plane state */
7976 if (plane->funcs->reset)
7977 plane->funcs->reset(plane);
7982 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7983 struct drm_plane *plane,
7984 uint32_t crtc_index)
7986 struct amdgpu_crtc *acrtc = NULL;
7987 struct drm_plane *cursor_plane;
7991 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7995 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7996 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7998 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8002 res = drm_crtc_init_with_planes(
8007 &amdgpu_dm_crtc_funcs, NULL);
8012 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8014 /* Create (reset) the plane state */
8015 if (acrtc->base.funcs->reset)
8016 acrtc->base.funcs->reset(&acrtc->base);
8018 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8019 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8021 acrtc->crtc_id = crtc_index;
8022 acrtc->base.enabled = false;
8023 acrtc->otg_inst = -1;
8025 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8026 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8027 true, MAX_COLOR_LUT_ENTRIES);
8028 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8034 kfree(cursor_plane);
8039 static int to_drm_connector_type(enum signal_type st)
8042 case SIGNAL_TYPE_HDMI_TYPE_A:
8043 return DRM_MODE_CONNECTOR_HDMIA;
8044 case SIGNAL_TYPE_EDP:
8045 return DRM_MODE_CONNECTOR_eDP;
8046 case SIGNAL_TYPE_LVDS:
8047 return DRM_MODE_CONNECTOR_LVDS;
8048 case SIGNAL_TYPE_RGB:
8049 return DRM_MODE_CONNECTOR_VGA;
8050 case SIGNAL_TYPE_DISPLAY_PORT:
8051 case SIGNAL_TYPE_DISPLAY_PORT_MST:
8052 return DRM_MODE_CONNECTOR_DisplayPort;
8053 case SIGNAL_TYPE_DVI_DUAL_LINK:
8054 case SIGNAL_TYPE_DVI_SINGLE_LINK:
8055 return DRM_MODE_CONNECTOR_DVID;
8056 case SIGNAL_TYPE_VIRTUAL:
8057 return DRM_MODE_CONNECTOR_VIRTUAL;
8060 return DRM_MODE_CONNECTOR_Unknown;
8064 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8066 struct drm_encoder *encoder;
8068 /* There is only one encoder per connector */
8069 drm_connector_for_each_possible_encoder(connector, encoder)
8075 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8077 struct drm_encoder *encoder;
8078 struct amdgpu_encoder *amdgpu_encoder;
8080 encoder = amdgpu_dm_connector_to_encoder(connector);
8082 if (encoder == NULL)
8085 amdgpu_encoder = to_amdgpu_encoder(encoder);
8087 amdgpu_encoder->native_mode.clock = 0;
8089 if (!list_empty(&connector->probed_modes)) {
8090 struct drm_display_mode *preferred_mode = NULL;
8092 list_for_each_entry(preferred_mode,
8093 &connector->probed_modes,
8095 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8096 amdgpu_encoder->native_mode = *preferred_mode;
8104 static struct drm_display_mode *
8105 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8107 int hdisplay, int vdisplay)
8109 struct drm_device *dev = encoder->dev;
8110 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8111 struct drm_display_mode *mode = NULL;
8112 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8114 mode = drm_mode_duplicate(dev, native_mode);
8119 mode->hdisplay = hdisplay;
8120 mode->vdisplay = vdisplay;
8121 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8122 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8128 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8129 struct drm_connector *connector)
8131 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8132 struct drm_display_mode *mode = NULL;
8133 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8134 struct amdgpu_dm_connector *amdgpu_dm_connector =
8135 to_amdgpu_dm_connector(connector);
8139 char name[DRM_DISPLAY_MODE_LEN];
8142 } common_modes[] = {
8143 { "640x480", 640, 480},
8144 { "800x600", 800, 600},
8145 { "1024x768", 1024, 768},
8146 { "1280x720", 1280, 720},
8147 { "1280x800", 1280, 800},
8148 {"1280x1024", 1280, 1024},
8149 { "1440x900", 1440, 900},
8150 {"1680x1050", 1680, 1050},
8151 {"1600x1200", 1600, 1200},
8152 {"1920x1080", 1920, 1080},
8153 {"1920x1200", 1920, 1200}
8156 n = ARRAY_SIZE(common_modes);
8158 for (i = 0; i < n; i++) {
8159 struct drm_display_mode *curmode = NULL;
8160 bool mode_existed = false;
8162 if (common_modes[i].w > native_mode->hdisplay ||
8163 common_modes[i].h > native_mode->vdisplay ||
8164 (common_modes[i].w == native_mode->hdisplay &&
8165 common_modes[i].h == native_mode->vdisplay))
8168 list_for_each_entry(curmode, &connector->probed_modes, head) {
8169 if (common_modes[i].w == curmode->hdisplay &&
8170 common_modes[i].h == curmode->vdisplay) {
8171 mode_existed = true;
8179 mode = amdgpu_dm_create_common_mode(encoder,
8180 common_modes[i].name, common_modes[i].w,
8185 drm_mode_probed_add(connector, mode);
8186 amdgpu_dm_connector->num_modes++;
8190 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8192 struct drm_encoder *encoder;
8193 struct amdgpu_encoder *amdgpu_encoder;
8194 const struct drm_display_mode *native_mode;
8196 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8197 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8200 encoder = amdgpu_dm_connector_to_encoder(connector);
8204 amdgpu_encoder = to_amdgpu_encoder(encoder);
8206 native_mode = &amdgpu_encoder->native_mode;
8207 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8210 drm_connector_set_panel_orientation_with_quirk(connector,
8211 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8212 native_mode->hdisplay,
8213 native_mode->vdisplay);
8216 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8219 struct amdgpu_dm_connector *amdgpu_dm_connector =
8220 to_amdgpu_dm_connector(connector);
8223 /* empty probed_modes */
8224 INIT_LIST_HEAD(&connector->probed_modes);
8225 amdgpu_dm_connector->num_modes =
8226 drm_add_edid_modes(connector, edid);
8228 /* sorting the probed modes before calling function
8229 * amdgpu_dm_get_native_mode() since EDID can have
8230 * more than one preferred mode. The modes that are
8231 * later in the probed mode list could be of higher
8232 * and preferred resolution. For example, 3840x2160
8233 * resolution in base EDID preferred timing and 4096x2160
8234 * preferred resolution in DID extension block later.
8236 drm_mode_sort(&connector->probed_modes);
8237 amdgpu_dm_get_native_mode(connector);
8239 /* Freesync capabilities are reset by calling
8240 * drm_add_edid_modes() and need to be
8243 amdgpu_dm_update_freesync_caps(connector, edid);
8245 amdgpu_set_panel_orientation(connector);
8247 amdgpu_dm_connector->num_modes = 0;
8251 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8252 struct drm_display_mode *mode)
8254 struct drm_display_mode *m;
8256 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8257 if (drm_mode_equal(m, mode))
8264 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8266 const struct drm_display_mode *m;
8267 struct drm_display_mode *new_mode;
8269 uint32_t new_modes_count = 0;
8271 /* Standard FPS values
8280 * 60 - Commonly used
8281 * 48,72,96,120 - Multiples of 24
8283 static const uint32_t common_rates[] = {
8284 23976, 24000, 25000, 29970, 30000,
8285 48000, 50000, 60000, 72000, 96000, 120000
8289 * Find mode with highest refresh rate with the same resolution
8290 * as the preferred mode. Some monitors report a preferred mode
8291 * with lower resolution than the highest refresh rate supported.
8294 m = get_highest_refresh_rate_mode(aconnector, true);
8298 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8299 uint64_t target_vtotal, target_vtotal_diff;
8302 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8305 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8306 common_rates[i] > aconnector->max_vfreq * 1000)
8309 num = (unsigned long long)m->clock * 1000 * 1000;
8310 den = common_rates[i] * (unsigned long long)m->htotal;
8311 target_vtotal = div_u64(num, den);
8312 target_vtotal_diff = target_vtotal - m->vtotal;
8314 /* Check for illegal modes */
8315 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8316 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8317 m->vtotal + target_vtotal_diff < m->vsync_end)
8320 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8324 new_mode->vtotal += (u16)target_vtotal_diff;
8325 new_mode->vsync_start += (u16)target_vtotal_diff;
8326 new_mode->vsync_end += (u16)target_vtotal_diff;
8327 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8328 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8330 if (!is_duplicate_mode(aconnector, new_mode)) {
8331 drm_mode_probed_add(&aconnector->base, new_mode);
8332 new_modes_count += 1;
8334 drm_mode_destroy(aconnector->base.dev, new_mode);
8337 return new_modes_count;
8340 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8343 struct amdgpu_dm_connector *amdgpu_dm_connector =
8344 to_amdgpu_dm_connector(connector);
8349 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8350 amdgpu_dm_connector->num_modes +=
8351 add_fs_modes(amdgpu_dm_connector);
8354 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8356 struct amdgpu_dm_connector *amdgpu_dm_connector =
8357 to_amdgpu_dm_connector(connector);
8358 struct drm_encoder *encoder;
8359 struct edid *edid = amdgpu_dm_connector->edid;
8361 encoder = amdgpu_dm_connector_to_encoder(connector);
8363 if (!drm_edid_is_valid(edid)) {
8364 amdgpu_dm_connector->num_modes =
8365 drm_add_modes_noedid(connector, 640, 480);
8367 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8368 amdgpu_dm_connector_add_common_modes(encoder, connector);
8369 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8371 amdgpu_dm_fbc_init(connector);
8373 return amdgpu_dm_connector->num_modes;
8376 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8377 struct amdgpu_dm_connector *aconnector,
8379 struct dc_link *link,
8382 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8385 * Some of the properties below require access to state, like bpc.
8386 * Allocate some default initial connector state with our reset helper.
8388 if (aconnector->base.funcs->reset)
8389 aconnector->base.funcs->reset(&aconnector->base);
8391 aconnector->connector_id = link_index;
8392 aconnector->dc_link = link;
8393 aconnector->base.interlace_allowed = false;
8394 aconnector->base.doublescan_allowed = false;
8395 aconnector->base.stereo_allowed = false;
8396 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8397 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8398 aconnector->audio_inst = -1;
8399 mutex_init(&aconnector->hpd_lock);
8402 * configure support HPD hot plug connector_>polled default value is 0
8403 * which means HPD hot plug not supported
8405 switch (connector_type) {
8406 case DRM_MODE_CONNECTOR_HDMIA:
8407 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8408 aconnector->base.ycbcr_420_allowed =
8409 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8411 case DRM_MODE_CONNECTOR_DisplayPort:
8412 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8413 link->link_enc = link_enc_cfg_get_link_enc(link);
8414 ASSERT(link->link_enc);
8416 aconnector->base.ycbcr_420_allowed =
8417 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8419 case DRM_MODE_CONNECTOR_DVID:
8420 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8426 drm_object_attach_property(&aconnector->base.base,
8427 dm->ddev->mode_config.scaling_mode_property,
8428 DRM_MODE_SCALE_NONE);
8430 drm_object_attach_property(&aconnector->base.base,
8431 adev->mode_info.underscan_property,
8433 drm_object_attach_property(&aconnector->base.base,
8434 adev->mode_info.underscan_hborder_property,
8436 drm_object_attach_property(&aconnector->base.base,
8437 adev->mode_info.underscan_vborder_property,
8440 if (!aconnector->mst_port)
8441 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8443 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8444 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8445 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8447 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8448 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8449 drm_object_attach_property(&aconnector->base.base,
8450 adev->mode_info.abm_level_property, 0);
8453 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8454 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8455 connector_type == DRM_MODE_CONNECTOR_eDP) {
8456 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8458 if (!aconnector->mst_port)
8459 drm_connector_attach_vrr_capable_property(&aconnector->base);
8461 #ifdef CONFIG_DRM_AMD_DC_HDCP
8462 if (adev->dm.hdcp_workqueue)
8463 drm_connector_attach_content_protection_property(&aconnector->base, true);
8468 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8469 struct i2c_msg *msgs, int num)
8471 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8472 struct ddc_service *ddc_service = i2c->ddc_service;
8473 struct i2c_command cmd;
8477 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8482 cmd.number_of_payloads = num;
8483 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8486 for (i = 0; i < num; i++) {
8487 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8488 cmd.payloads[i].address = msgs[i].addr;
8489 cmd.payloads[i].length = msgs[i].len;
8490 cmd.payloads[i].data = msgs[i].buf;
8494 ddc_service->ctx->dc,
8495 ddc_service->ddc_pin->hw_info.ddc_channel,
8499 kfree(cmd.payloads);
8503 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8505 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8508 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8509 .master_xfer = amdgpu_dm_i2c_xfer,
8510 .functionality = amdgpu_dm_i2c_func,
8513 static struct amdgpu_i2c_adapter *
8514 create_i2c(struct ddc_service *ddc_service,
8518 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8519 struct amdgpu_i2c_adapter *i2c;
8521 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8524 i2c->base.owner = THIS_MODULE;
8525 i2c->base.class = I2C_CLASS_DDC;
8526 i2c->base.dev.parent = &adev->pdev->dev;
8527 i2c->base.algo = &amdgpu_dm_i2c_algo;
8528 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8529 i2c_set_adapdata(&i2c->base, i2c);
8530 i2c->ddc_service = ddc_service;
8531 if (i2c->ddc_service->ddc_pin)
8532 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8539 * Note: this function assumes that dc_link_detect() was called for the
8540 * dc_link which will be represented by this aconnector.
8542 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8543 struct amdgpu_dm_connector *aconnector,
8544 uint32_t link_index,
8545 struct amdgpu_encoder *aencoder)
8549 struct dc *dc = dm->dc;
8550 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8551 struct amdgpu_i2c_adapter *i2c;
8553 link->priv = aconnector;
8555 DRM_DEBUG_DRIVER("%s()\n", __func__);
8557 i2c = create_i2c(link->ddc, link->link_index, &res);
8559 DRM_ERROR("Failed to create i2c adapter data\n");
8563 aconnector->i2c = i2c;
8564 res = i2c_add_adapter(&i2c->base);
8567 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8571 connector_type = to_drm_connector_type(link->connector_signal);
8573 res = drm_connector_init_with_ddc(
8576 &amdgpu_dm_connector_funcs,
8581 DRM_ERROR("connector_init failed\n");
8582 aconnector->connector_id = -1;
8586 drm_connector_helper_add(
8588 &amdgpu_dm_connector_helper_funcs);
8590 amdgpu_dm_connector_init_helper(
8597 drm_connector_attach_encoder(
8598 &aconnector->base, &aencoder->base);
8600 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8601 || connector_type == DRM_MODE_CONNECTOR_eDP)
8602 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8607 aconnector->i2c = NULL;
8612 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8614 switch (adev->mode_info.num_crtc) {
8631 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8632 struct amdgpu_encoder *aencoder,
8633 uint32_t link_index)
8635 struct amdgpu_device *adev = drm_to_adev(dev);
8637 int res = drm_encoder_init(dev,
8639 &amdgpu_dm_encoder_funcs,
8640 DRM_MODE_ENCODER_TMDS,
8643 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8646 aencoder->encoder_id = link_index;
8648 aencoder->encoder_id = -1;
8650 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8655 static void manage_dm_interrupts(struct amdgpu_device *adev,
8656 struct amdgpu_crtc *acrtc,
8660 * We have no guarantee that the frontend index maps to the same
8661 * backend index - some even map to more than one.
8663 * TODO: Use a different interrupt or check DC itself for the mapping.
8666 amdgpu_display_crtc_idx_to_irq_type(
8671 drm_crtc_vblank_on(&acrtc->base);
8674 &adev->pageflip_irq,
8676 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8683 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8691 &adev->pageflip_irq,
8693 drm_crtc_vblank_off(&acrtc->base);
8697 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8698 struct amdgpu_crtc *acrtc)
8701 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8704 * This reads the current state for the IRQ and force reapplies
8705 * the setting to hardware.
8707 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8711 is_scaling_state_different(const struct dm_connector_state *dm_state,
8712 const struct dm_connector_state *old_dm_state)
8714 if (dm_state->scaling != old_dm_state->scaling)
8716 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8717 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8719 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8720 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8722 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8723 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8728 #ifdef CONFIG_DRM_AMD_DC_HDCP
8729 static bool is_content_protection_different(struct drm_connector_state *state,
8730 const struct drm_connector_state *old_state,
8731 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8733 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8734 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8736 /* Handle: Type0/1 change */
8737 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8738 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8739 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8743 /* CP is being re enabled, ignore this
8745 * Handles: ENABLED -> DESIRED
8747 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8748 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8749 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8753 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8755 * Handles: UNDESIRED -> ENABLED
8757 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8758 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8759 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8761 /* Stream removed and re-enabled
8763 * Can sometimes overlap with the HPD case,
8764 * thus set update_hdcp to false to avoid
8765 * setting HDCP multiple times.
8767 * Handles: DESIRED -> DESIRED (Special case)
8769 if (!(old_state->crtc && old_state->crtc->enabled) &&
8770 state->crtc && state->crtc->enabled &&
8771 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8772 dm_con_state->update_hdcp = false;
8776 /* Hot-plug, headless s3, dpms
8778 * Only start HDCP if the display is connected/enabled.
8779 * update_hdcp flag will be set to false until the next
8782 * Handles: DESIRED -> DESIRED (Special case)
8784 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8785 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8786 dm_con_state->update_hdcp = false;
8791 * Handles: UNDESIRED -> UNDESIRED
8792 * DESIRED -> DESIRED
8793 * ENABLED -> ENABLED
8795 if (old_state->content_protection == state->content_protection)
8799 * Handles: UNDESIRED -> DESIRED
8800 * DESIRED -> UNDESIRED
8801 * ENABLED -> UNDESIRED
8803 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8807 * Handles: DESIRED -> ENABLED
8813 static void remove_stream(struct amdgpu_device *adev,
8814 struct amdgpu_crtc *acrtc,
8815 struct dc_stream_state *stream)
8817 /* this is the update mode case */
8819 acrtc->otg_inst = -1;
8820 acrtc->enabled = false;
8823 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8824 struct dc_cursor_position *position)
8826 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8828 int xorigin = 0, yorigin = 0;
8830 if (!crtc || !plane->state->fb)
8833 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8834 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8835 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8837 plane->state->crtc_w,
8838 plane->state->crtc_h);
8842 x = plane->state->crtc_x;
8843 y = plane->state->crtc_y;
8845 if (x <= -amdgpu_crtc->max_cursor_width ||
8846 y <= -amdgpu_crtc->max_cursor_height)
8850 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8854 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8857 position->enable = true;
8858 position->translate_by_source = true;
8861 position->x_hotspot = xorigin;
8862 position->y_hotspot = yorigin;
8867 static void handle_cursor_update(struct drm_plane *plane,
8868 struct drm_plane_state *old_plane_state)
8870 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8871 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8872 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8873 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8874 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8875 uint64_t address = afb ? afb->address : 0;
8876 struct dc_cursor_position position = {0};
8877 struct dc_cursor_attributes attributes;
8880 if (!plane->state->fb && !old_plane_state->fb)
8883 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8885 amdgpu_crtc->crtc_id,
8886 plane->state->crtc_w,
8887 plane->state->crtc_h);
8889 ret = get_cursor_position(plane, crtc, &position);
8893 if (!position.enable) {
8894 /* turn off cursor */
8895 if (crtc_state && crtc_state->stream) {
8896 mutex_lock(&adev->dm.dc_lock);
8897 dc_stream_set_cursor_position(crtc_state->stream,
8899 mutex_unlock(&adev->dm.dc_lock);
8904 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8905 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8907 memset(&attributes, 0, sizeof(attributes));
8908 attributes.address.high_part = upper_32_bits(address);
8909 attributes.address.low_part = lower_32_bits(address);
8910 attributes.width = plane->state->crtc_w;
8911 attributes.height = plane->state->crtc_h;
8912 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8913 attributes.rotation_angle = 0;
8914 attributes.attribute_flags.value = 0;
8916 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8918 if (crtc_state->stream) {
8919 mutex_lock(&adev->dm.dc_lock);
8920 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8922 DRM_ERROR("DC failed to set cursor attributes\n");
8924 if (!dc_stream_set_cursor_position(crtc_state->stream,
8926 DRM_ERROR("DC failed to set cursor position\n");
8927 mutex_unlock(&adev->dm.dc_lock);
8931 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8934 assert_spin_locked(&acrtc->base.dev->event_lock);
8935 WARN_ON(acrtc->event);
8937 acrtc->event = acrtc->base.state->event;
8939 /* Set the flip status */
8940 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8942 /* Mark this event as consumed */
8943 acrtc->base.state->event = NULL;
8945 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8949 static void update_freesync_state_on_stream(
8950 struct amdgpu_display_manager *dm,
8951 struct dm_crtc_state *new_crtc_state,
8952 struct dc_stream_state *new_stream,
8953 struct dc_plane_state *surface,
8954 u32 flip_timestamp_in_us)
8956 struct mod_vrr_params vrr_params;
8957 struct dc_info_packet vrr_infopacket = {0};
8958 struct amdgpu_device *adev = dm->adev;
8959 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8960 unsigned long flags;
8961 bool pack_sdp_v1_3 = false;
8967 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8968 * For now it's sufficient to just guard against these conditions.
8971 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8974 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8975 vrr_params = acrtc->dm_irq_params.vrr_params;
8978 mod_freesync_handle_preflip(
8979 dm->freesync_module,
8982 flip_timestamp_in_us,
8985 if (adev->family < AMDGPU_FAMILY_AI &&
8986 amdgpu_dm_vrr_active(new_crtc_state)) {
8987 mod_freesync_handle_v_update(dm->freesync_module,
8988 new_stream, &vrr_params);
8990 /* Need to call this before the frame ends. */
8991 dc_stream_adjust_vmin_vmax(dm->dc,
8992 new_crtc_state->stream,
8993 &vrr_params.adjust);
8997 mod_freesync_build_vrr_infopacket(
8998 dm->freesync_module,
9002 TRANSFER_FUNC_UNKNOWN,
9006 new_crtc_state->freesync_timing_changed |=
9007 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9009 sizeof(vrr_params.adjust)) != 0);
9011 new_crtc_state->freesync_vrr_info_changed |=
9012 (memcmp(&new_crtc_state->vrr_infopacket,
9014 sizeof(vrr_infopacket)) != 0);
9016 acrtc->dm_irq_params.vrr_params = vrr_params;
9017 new_crtc_state->vrr_infopacket = vrr_infopacket;
9019 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9020 new_stream->vrr_infopacket = vrr_infopacket;
9022 if (new_crtc_state->freesync_vrr_info_changed)
9023 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9024 new_crtc_state->base.crtc->base.id,
9025 (int)new_crtc_state->base.vrr_enabled,
9026 (int)vrr_params.state);
9028 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9031 static void update_stream_irq_parameters(
9032 struct amdgpu_display_manager *dm,
9033 struct dm_crtc_state *new_crtc_state)
9035 struct dc_stream_state *new_stream = new_crtc_state->stream;
9036 struct mod_vrr_params vrr_params;
9037 struct mod_freesync_config config = new_crtc_state->freesync_config;
9038 struct amdgpu_device *adev = dm->adev;
9039 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9040 unsigned long flags;
9046 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9047 * For now it's sufficient to just guard against these conditions.
9049 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9052 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9053 vrr_params = acrtc->dm_irq_params.vrr_params;
9055 if (new_crtc_state->vrr_supported &&
9056 config.min_refresh_in_uhz &&
9057 config.max_refresh_in_uhz) {
9059 * if freesync compatible mode was set, config.state will be set
9062 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9063 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9064 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9065 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9066 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9067 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9068 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9070 config.state = new_crtc_state->base.vrr_enabled ?
9071 VRR_STATE_ACTIVE_VARIABLE :
9075 config.state = VRR_STATE_UNSUPPORTED;
9078 mod_freesync_build_vrr_params(dm->freesync_module,
9080 &config, &vrr_params);
9082 new_crtc_state->freesync_timing_changed |=
9083 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9084 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9086 new_crtc_state->freesync_config = config;
9087 /* Copy state for access from DM IRQ handler */
9088 acrtc->dm_irq_params.freesync_config = config;
9089 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9090 acrtc->dm_irq_params.vrr_params = vrr_params;
9091 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9094 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9095 struct dm_crtc_state *new_state)
9097 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9098 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9100 if (!old_vrr_active && new_vrr_active) {
9101 /* Transition VRR inactive -> active:
9102 * While VRR is active, we must not disable vblank irq, as a
9103 * reenable after disable would compute bogus vblank/pflip
9104 * timestamps if it likely happened inside display front-porch.
9106 * We also need vupdate irq for the actual core vblank handling
9109 dm_set_vupdate_irq(new_state->base.crtc, true);
9110 drm_crtc_vblank_get(new_state->base.crtc);
9111 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9112 __func__, new_state->base.crtc->base.id);
9113 } else if (old_vrr_active && !new_vrr_active) {
9114 /* Transition VRR active -> inactive:
9115 * Allow vblank irq disable again for fixed refresh rate.
9117 dm_set_vupdate_irq(new_state->base.crtc, false);
9118 drm_crtc_vblank_put(new_state->base.crtc);
9119 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9120 __func__, new_state->base.crtc->base.id);
9124 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9126 struct drm_plane *plane;
9127 struct drm_plane_state *old_plane_state;
9131 * TODO: Make this per-stream so we don't issue redundant updates for
9132 * commits with multiple streams.
9134 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9135 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9136 handle_cursor_update(plane, old_plane_state);
9139 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9140 struct dc_state *dc_state,
9141 struct drm_device *dev,
9142 struct amdgpu_display_manager *dm,
9143 struct drm_crtc *pcrtc,
9144 bool wait_for_vblank)
9147 uint64_t timestamp_ns;
9148 struct drm_plane *plane;
9149 struct drm_plane_state *old_plane_state, *new_plane_state;
9150 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9151 struct drm_crtc_state *new_pcrtc_state =
9152 drm_atomic_get_new_crtc_state(state, pcrtc);
9153 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9154 struct dm_crtc_state *dm_old_crtc_state =
9155 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9156 int planes_count = 0, vpos, hpos;
9158 unsigned long flags;
9159 struct amdgpu_bo *abo;
9160 uint32_t target_vblank, last_flip_vblank;
9161 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9162 bool pflip_present = false;
9164 struct dc_surface_update surface_updates[MAX_SURFACES];
9165 struct dc_plane_info plane_infos[MAX_SURFACES];
9166 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9167 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9168 struct dc_stream_update stream_update;
9171 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9174 dm_error("Failed to allocate update bundle\n");
9179 * Disable the cursor first if we're disabling all the planes.
9180 * It'll remain on the screen after the planes are re-enabled
9183 if (acrtc_state->active_planes == 0)
9184 amdgpu_dm_commit_cursors(state);
9186 /* update planes when needed */
9187 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9188 struct drm_crtc *crtc = new_plane_state->crtc;
9189 struct drm_crtc_state *new_crtc_state;
9190 struct drm_framebuffer *fb = new_plane_state->fb;
9191 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9192 bool plane_needs_flip;
9193 struct dc_plane_state *dc_plane;
9194 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9196 /* Cursor plane is handled after stream updates */
9197 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9200 if (!fb || !crtc || pcrtc != crtc)
9203 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9204 if (!new_crtc_state->active)
9207 dc_plane = dm_new_plane_state->dc_state;
9209 bundle->surface_updates[planes_count].surface = dc_plane;
9210 if (new_pcrtc_state->color_mgmt_changed) {
9211 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9212 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9213 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9216 fill_dc_scaling_info(dm->adev, new_plane_state,
9217 &bundle->scaling_infos[planes_count]);
9219 bundle->surface_updates[planes_count].scaling_info =
9220 &bundle->scaling_infos[planes_count];
9222 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9224 pflip_present = pflip_present || plane_needs_flip;
9226 if (!plane_needs_flip) {
9231 abo = gem_to_amdgpu_bo(fb->obj[0]);
9234 * Wait for all fences on this FB. Do limited wait to avoid
9235 * deadlock during GPU reset when this fence will not signal
9236 * but we hold reservation lock for the BO.
9238 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9239 msecs_to_jiffies(5000));
9240 if (unlikely(r <= 0))
9241 DRM_ERROR("Waiting for fences timed out!");
9243 fill_dc_plane_info_and_addr(
9244 dm->adev, new_plane_state,
9246 &bundle->plane_infos[planes_count],
9247 &bundle->flip_addrs[planes_count].address,
9248 afb->tmz_surface, false);
9250 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9251 new_plane_state->plane->index,
9252 bundle->plane_infos[planes_count].dcc.enable);
9254 bundle->surface_updates[planes_count].plane_info =
9255 &bundle->plane_infos[planes_count];
9258 * Only allow immediate flips for fast updates that don't
9259 * change FB pitch, DCC state, rotation or mirroing.
9261 bundle->flip_addrs[planes_count].flip_immediate =
9262 crtc->state->async_flip &&
9263 acrtc_state->update_type == UPDATE_TYPE_FAST;
9265 timestamp_ns = ktime_get_ns();
9266 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9267 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9268 bundle->surface_updates[planes_count].surface = dc_plane;
9270 if (!bundle->surface_updates[planes_count].surface) {
9271 DRM_ERROR("No surface for CRTC: id=%d\n",
9272 acrtc_attach->crtc_id);
9276 if (plane == pcrtc->primary)
9277 update_freesync_state_on_stream(
9280 acrtc_state->stream,
9282 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9284 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9286 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9287 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9293 if (pflip_present) {
9295 /* Use old throttling in non-vrr fixed refresh rate mode
9296 * to keep flip scheduling based on target vblank counts
9297 * working in a backwards compatible way, e.g., for
9298 * clients using the GLX_OML_sync_control extension or
9299 * DRI3/Present extension with defined target_msc.
9301 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9304 /* For variable refresh rate mode only:
9305 * Get vblank of last completed flip to avoid > 1 vrr
9306 * flips per video frame by use of throttling, but allow
9307 * flip programming anywhere in the possibly large
9308 * variable vrr vblank interval for fine-grained flip
9309 * timing control and more opportunity to avoid stutter
9310 * on late submission of flips.
9312 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9313 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9314 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9317 target_vblank = last_flip_vblank + wait_for_vblank;
9320 * Wait until we're out of the vertical blank period before the one
9321 * targeted by the flip
9323 while ((acrtc_attach->enabled &&
9324 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9325 0, &vpos, &hpos, NULL,
9326 NULL, &pcrtc->hwmode)
9327 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9328 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9329 (int)(target_vblank -
9330 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9331 usleep_range(1000, 1100);
9335 * Prepare the flip event for the pageflip interrupt to handle.
9337 * This only works in the case where we've already turned on the
9338 * appropriate hardware blocks (eg. HUBP) so in the transition case
9339 * from 0 -> n planes we have to skip a hardware generated event
9340 * and rely on sending it from software.
9342 if (acrtc_attach->base.state->event &&
9343 acrtc_state->active_planes > 0 &&
9344 !acrtc_state->force_dpms_off) {
9345 drm_crtc_vblank_get(pcrtc);
9347 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9349 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9350 prepare_flip_isr(acrtc_attach);
9352 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9355 if (acrtc_state->stream) {
9356 if (acrtc_state->freesync_vrr_info_changed)
9357 bundle->stream_update.vrr_infopacket =
9358 &acrtc_state->stream->vrr_infopacket;
9362 /* Update the planes if changed or disable if we don't have any. */
9363 if ((planes_count || acrtc_state->active_planes == 0) &&
9364 acrtc_state->stream) {
9365 #if defined(CONFIG_DRM_AMD_DC_DCN)
9367 * If PSR or idle optimizations are enabled then flush out
9368 * any pending work before hardware programming.
9370 if (dm->vblank_control_workqueue)
9371 flush_workqueue(dm->vblank_control_workqueue);
9374 bundle->stream_update.stream = acrtc_state->stream;
9375 if (new_pcrtc_state->mode_changed) {
9376 bundle->stream_update.src = acrtc_state->stream->src;
9377 bundle->stream_update.dst = acrtc_state->stream->dst;
9380 if (new_pcrtc_state->color_mgmt_changed) {
9382 * TODO: This isn't fully correct since we've actually
9383 * already modified the stream in place.
9385 bundle->stream_update.gamut_remap =
9386 &acrtc_state->stream->gamut_remap_matrix;
9387 bundle->stream_update.output_csc_transform =
9388 &acrtc_state->stream->csc_color_matrix;
9389 bundle->stream_update.out_transfer_func =
9390 acrtc_state->stream->out_transfer_func;
9393 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9394 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9395 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9398 * If FreeSync state on the stream has changed then we need to
9399 * re-adjust the min/max bounds now that DC doesn't handle this
9400 * as part of commit.
9402 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9403 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9404 dc_stream_adjust_vmin_vmax(
9405 dm->dc, acrtc_state->stream,
9406 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9407 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9409 mutex_lock(&dm->dc_lock);
9410 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9411 acrtc_state->stream->link->psr_settings.psr_allow_active)
9412 amdgpu_dm_psr_disable(acrtc_state->stream);
9414 dc_commit_updates_for_stream(dm->dc,
9415 bundle->surface_updates,
9417 acrtc_state->stream,
9418 &bundle->stream_update,
9422 * Enable or disable the interrupts on the backend.
9424 * Most pipes are put into power gating when unused.
9426 * When power gating is enabled on a pipe we lose the
9427 * interrupt enablement state when power gating is disabled.
9429 * So we need to update the IRQ control state in hardware
9430 * whenever the pipe turns on (since it could be previously
9431 * power gated) or off (since some pipes can't be power gated
9434 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9435 dm_update_pflip_irq_state(drm_to_adev(dev),
9438 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9439 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9440 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9441 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9443 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9444 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9445 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9446 struct amdgpu_dm_connector *aconn =
9447 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9449 if (aconn->psr_skip_count > 0)
9450 aconn->psr_skip_count--;
9452 /* Allow PSR when skip count is 0. */
9453 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9455 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9458 mutex_unlock(&dm->dc_lock);
9462 * Update cursor state *after* programming all the planes.
9463 * This avoids redundant programming in the case where we're going
9464 * to be disabling a single plane - those pipes are being disabled.
9466 if (acrtc_state->active_planes)
9467 amdgpu_dm_commit_cursors(state);
9473 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9474 struct drm_atomic_state *state)
9476 struct amdgpu_device *adev = drm_to_adev(dev);
9477 struct amdgpu_dm_connector *aconnector;
9478 struct drm_connector *connector;
9479 struct drm_connector_state *old_con_state, *new_con_state;
9480 struct drm_crtc_state *new_crtc_state;
9481 struct dm_crtc_state *new_dm_crtc_state;
9482 const struct dc_stream_status *status;
9485 /* Notify device removals. */
9486 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9487 if (old_con_state->crtc != new_con_state->crtc) {
9488 /* CRTC changes require notification. */
9492 if (!new_con_state->crtc)
9495 new_crtc_state = drm_atomic_get_new_crtc_state(
9496 state, new_con_state->crtc);
9498 if (!new_crtc_state)
9501 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9505 aconnector = to_amdgpu_dm_connector(connector);
9507 mutex_lock(&adev->dm.audio_lock);
9508 inst = aconnector->audio_inst;
9509 aconnector->audio_inst = -1;
9510 mutex_unlock(&adev->dm.audio_lock);
9512 amdgpu_dm_audio_eld_notify(adev, inst);
9515 /* Notify audio device additions. */
9516 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9517 if (!new_con_state->crtc)
9520 new_crtc_state = drm_atomic_get_new_crtc_state(
9521 state, new_con_state->crtc);
9523 if (!new_crtc_state)
9526 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9529 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9530 if (!new_dm_crtc_state->stream)
9533 status = dc_stream_get_status(new_dm_crtc_state->stream);
9537 aconnector = to_amdgpu_dm_connector(connector);
9539 mutex_lock(&adev->dm.audio_lock);
9540 inst = status->audio_inst;
9541 aconnector->audio_inst = inst;
9542 mutex_unlock(&adev->dm.audio_lock);
9544 amdgpu_dm_audio_eld_notify(adev, inst);
9549 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9550 * @crtc_state: the DRM CRTC state
9551 * @stream_state: the DC stream state.
9553 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9554 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9556 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9557 struct dc_stream_state *stream_state)
9559 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9563 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9564 * @state: The atomic state to commit
9566 * This will tell DC to commit the constructed DC state from atomic_check,
9567 * programming the hardware. Any failures here implies a hardware failure, since
9568 * atomic check should have filtered anything non-kosher.
9570 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9572 struct drm_device *dev = state->dev;
9573 struct amdgpu_device *adev = drm_to_adev(dev);
9574 struct amdgpu_display_manager *dm = &adev->dm;
9575 struct dm_atomic_state *dm_state;
9576 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9578 struct drm_crtc *crtc;
9579 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9580 unsigned long flags;
9581 bool wait_for_vblank = true;
9582 struct drm_connector *connector;
9583 struct drm_connector_state *old_con_state, *new_con_state;
9584 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9585 int crtc_disable_count = 0;
9586 bool mode_set_reset_required = false;
9588 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9590 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9592 dm_state = dm_atomic_get_new_state(state);
9593 if (dm_state && dm_state->context) {
9594 dc_state = dm_state->context;
9596 /* No state changes, retain current state. */
9597 dc_state_temp = dc_create_state(dm->dc);
9598 ASSERT(dc_state_temp);
9599 dc_state = dc_state_temp;
9600 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9603 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9604 new_crtc_state, i) {
9605 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9607 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9609 if (old_crtc_state->active &&
9610 (!new_crtc_state->active ||
9611 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9612 manage_dm_interrupts(adev, acrtc, false);
9613 dc_stream_release(dm_old_crtc_state->stream);
9617 drm_atomic_helper_calc_timestamping_constants(state);
9619 /* update changed items */
9620 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9621 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9623 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9627 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9628 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9629 "connectors_changed:%d\n",
9631 new_crtc_state->enable,
9632 new_crtc_state->active,
9633 new_crtc_state->planes_changed,
9634 new_crtc_state->mode_changed,
9635 new_crtc_state->active_changed,
9636 new_crtc_state->connectors_changed);
9638 /* Disable cursor if disabling crtc */
9639 if (old_crtc_state->active && !new_crtc_state->active) {
9640 struct dc_cursor_position position;
9642 memset(&position, 0, sizeof(position));
9643 mutex_lock(&dm->dc_lock);
9644 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9645 mutex_unlock(&dm->dc_lock);
9648 /* Copy all transient state flags into dc state */
9649 if (dm_new_crtc_state->stream) {
9650 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9651 dm_new_crtc_state->stream);
9654 /* handles headless hotplug case, updating new_state and
9655 * aconnector as needed
9658 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9660 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9662 if (!dm_new_crtc_state->stream) {
9664 * this could happen because of issues with
9665 * userspace notifications delivery.
9666 * In this case userspace tries to set mode on
9667 * display which is disconnected in fact.
9668 * dc_sink is NULL in this case on aconnector.
9669 * We expect reset mode will come soon.
9671 * This can also happen when unplug is done
9672 * during resume sequence ended
9674 * In this case, we want to pretend we still
9675 * have a sink to keep the pipe running so that
9676 * hw state is consistent with the sw state
9678 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9679 __func__, acrtc->base.base.id);
9683 if (dm_old_crtc_state->stream)
9684 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9686 pm_runtime_get_noresume(dev->dev);
9688 acrtc->enabled = true;
9689 acrtc->hw_mode = new_crtc_state->mode;
9690 crtc->hwmode = new_crtc_state->mode;
9691 mode_set_reset_required = true;
9692 } else if (modereset_required(new_crtc_state)) {
9693 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9694 /* i.e. reset mode */
9695 if (dm_old_crtc_state->stream)
9696 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9698 mode_set_reset_required = true;
9700 } /* for_each_crtc_in_state() */
9703 /* if there mode set or reset, disable eDP PSR */
9704 if (mode_set_reset_required) {
9705 #if defined(CONFIG_DRM_AMD_DC_DCN)
9706 if (dm->vblank_control_workqueue)
9707 flush_workqueue(dm->vblank_control_workqueue);
9709 amdgpu_dm_psr_disable_all(dm);
9712 dm_enable_per_frame_crtc_master_sync(dc_state);
9713 mutex_lock(&dm->dc_lock);
9714 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9715 #if defined(CONFIG_DRM_AMD_DC_DCN)
9716 /* Allow idle optimization when vblank count is 0 for display off */
9717 if (dm->active_vblank_irq_count == 0)
9718 dc_allow_idle_optimizations(dm->dc,true);
9720 mutex_unlock(&dm->dc_lock);
9723 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9724 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9726 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9728 if (dm_new_crtc_state->stream != NULL) {
9729 const struct dc_stream_status *status =
9730 dc_stream_get_status(dm_new_crtc_state->stream);
9733 status = dc_stream_get_status_from_state(dc_state,
9734 dm_new_crtc_state->stream);
9736 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9738 acrtc->otg_inst = status->primary_otg_inst;
9741 #ifdef CONFIG_DRM_AMD_DC_HDCP
9742 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9743 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9744 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9745 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9747 new_crtc_state = NULL;
9750 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9752 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9754 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9755 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9756 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9757 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9758 dm_new_con_state->update_hdcp = true;
9762 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9763 hdcp_update_display(
9764 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9765 new_con_state->hdcp_content_type,
9766 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9770 /* Handle connector state changes */
9771 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9772 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9773 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9774 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9775 struct dc_surface_update dummy_updates[MAX_SURFACES];
9776 struct dc_stream_update stream_update;
9777 struct dc_info_packet hdr_packet;
9778 struct dc_stream_status *status = NULL;
9779 bool abm_changed, hdr_changed, scaling_changed;
9781 memset(&dummy_updates, 0, sizeof(dummy_updates));
9782 memset(&stream_update, 0, sizeof(stream_update));
9785 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9786 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9789 /* Skip any modesets/resets */
9790 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9793 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9794 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9796 scaling_changed = is_scaling_state_different(dm_new_con_state,
9799 abm_changed = dm_new_crtc_state->abm_level !=
9800 dm_old_crtc_state->abm_level;
9803 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9805 if (!scaling_changed && !abm_changed && !hdr_changed)
9808 stream_update.stream = dm_new_crtc_state->stream;
9809 if (scaling_changed) {
9810 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9811 dm_new_con_state, dm_new_crtc_state->stream);
9813 stream_update.src = dm_new_crtc_state->stream->src;
9814 stream_update.dst = dm_new_crtc_state->stream->dst;
9818 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9820 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9824 fill_hdr_info_packet(new_con_state, &hdr_packet);
9825 stream_update.hdr_static_metadata = &hdr_packet;
9828 status = dc_stream_get_status(dm_new_crtc_state->stream);
9830 if (WARN_ON(!status))
9833 WARN_ON(!status->plane_count);
9836 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9837 * Here we create an empty update on each plane.
9838 * To fix this, DC should permit updating only stream properties.
9840 for (j = 0; j < status->plane_count; j++)
9841 dummy_updates[j].surface = status->plane_states[0];
9844 mutex_lock(&dm->dc_lock);
9845 dc_commit_updates_for_stream(dm->dc,
9847 status->plane_count,
9848 dm_new_crtc_state->stream,
9851 mutex_unlock(&dm->dc_lock);
9854 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9855 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9856 new_crtc_state, i) {
9857 if (old_crtc_state->active && !new_crtc_state->active)
9858 crtc_disable_count++;
9860 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9863 /* For freesync config update on crtc state and params for irq */
9864 update_stream_irq_parameters(dm, dm_new_crtc_state);
9866 /* Handle vrr on->off / off->on transitions */
9867 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9872 * Enable interrupts for CRTCs that are newly enabled or went through
9873 * a modeset. It was intentionally deferred until after the front end
9874 * state was modified to wait until the OTG was on and so the IRQ
9875 * handlers didn't access stale or invalid state.
9877 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9878 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9879 #ifdef CONFIG_DEBUG_FS
9880 bool configure_crc = false;
9881 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9882 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9883 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9885 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9886 cur_crc_src = acrtc->dm_irq_params.crc_src;
9887 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9889 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9891 if (new_crtc_state->active &&
9892 (!old_crtc_state->active ||
9893 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9894 dc_stream_retain(dm_new_crtc_state->stream);
9895 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9896 manage_dm_interrupts(adev, acrtc, true);
9898 #ifdef CONFIG_DEBUG_FS
9900 * Frontend may have changed so reapply the CRC capture
9901 * settings for the stream.
9903 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9905 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9906 configure_crc = true;
9907 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9908 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9909 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9910 acrtc->dm_irq_params.crc_window.update_win = true;
9911 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9912 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9913 crc_rd_wrk->crtc = crtc;
9914 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9915 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9921 if (amdgpu_dm_crtc_configure_crc_source(
9922 crtc, dm_new_crtc_state, cur_crc_src))
9923 DRM_DEBUG_DRIVER("Failed to configure crc source");
9928 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9929 if (new_crtc_state->async_flip)
9930 wait_for_vblank = false;
9932 /* update planes when needed per crtc*/
9933 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9934 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9936 if (dm_new_crtc_state->stream)
9937 amdgpu_dm_commit_planes(state, dc_state, dev,
9938 dm, crtc, wait_for_vblank);
9941 /* Update audio instances for each connector. */
9942 amdgpu_dm_commit_audio(dev, state);
9944 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9945 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9946 /* restore the backlight level */
9947 for (i = 0; i < dm->num_of_edps; i++) {
9948 if (dm->backlight_dev[i] &&
9949 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9950 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9954 * send vblank event on all events not handled in flip and
9955 * mark consumed event for drm_atomic_helper_commit_hw_done
9957 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9958 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9960 if (new_crtc_state->event)
9961 drm_send_event_locked(dev, &new_crtc_state->event->base);
9963 new_crtc_state->event = NULL;
9965 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9967 /* Signal HW programming completion */
9968 drm_atomic_helper_commit_hw_done(state);
9970 if (wait_for_vblank)
9971 drm_atomic_helper_wait_for_flip_done(dev, state);
9973 drm_atomic_helper_cleanup_planes(dev, state);
9975 /* return the stolen vga memory back to VRAM */
9976 if (!adev->mman.keep_stolen_vga_memory)
9977 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9978 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9981 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9982 * so we can put the GPU into runtime suspend if we're not driving any
9985 for (i = 0; i < crtc_disable_count; i++)
9986 pm_runtime_put_autosuspend(dev->dev);
9987 pm_runtime_mark_last_busy(dev->dev);
9990 dc_release_state(dc_state_temp);
9994 static int dm_force_atomic_commit(struct drm_connector *connector)
9997 struct drm_device *ddev = connector->dev;
9998 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9999 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10000 struct drm_plane *plane = disconnected_acrtc->base.primary;
10001 struct drm_connector_state *conn_state;
10002 struct drm_crtc_state *crtc_state;
10003 struct drm_plane_state *plane_state;
10008 state->acquire_ctx = ddev->mode_config.acquire_ctx;
10010 /* Construct an atomic state to restore previous display setting */
10013 * Attach connectors to drm_atomic_state
10015 conn_state = drm_atomic_get_connector_state(state, connector);
10017 ret = PTR_ERR_OR_ZERO(conn_state);
10021 /* Attach crtc to drm_atomic_state*/
10022 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10024 ret = PTR_ERR_OR_ZERO(crtc_state);
10028 /* force a restore */
10029 crtc_state->mode_changed = true;
10031 /* Attach plane to drm_atomic_state */
10032 plane_state = drm_atomic_get_plane_state(state, plane);
10034 ret = PTR_ERR_OR_ZERO(plane_state);
10038 /* Call commit internally with the state we just constructed */
10039 ret = drm_atomic_commit(state);
10042 drm_atomic_state_put(state);
10044 DRM_ERROR("Restoring old state failed with %i\n", ret);
10050 * This function handles all cases when set mode does not come upon hotplug.
10051 * This includes when a display is unplugged then plugged back into the
10052 * same port and when running without usermode desktop manager supprot
10054 void dm_restore_drm_connector_state(struct drm_device *dev,
10055 struct drm_connector *connector)
10057 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10058 struct amdgpu_crtc *disconnected_acrtc;
10059 struct dm_crtc_state *acrtc_state;
10061 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10064 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10065 if (!disconnected_acrtc)
10068 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10069 if (!acrtc_state->stream)
10073 * If the previous sink is not released and different from the current,
10074 * we deduce we are in a state where we can not rely on usermode call
10075 * to turn on the display, so we do it here
10077 if (acrtc_state->stream->sink != aconnector->dc_sink)
10078 dm_force_atomic_commit(&aconnector->base);
10082 * Grabs all modesetting locks to serialize against any blocking commits,
10083 * Waits for completion of all non blocking commits.
10085 static int do_aquire_global_lock(struct drm_device *dev,
10086 struct drm_atomic_state *state)
10088 struct drm_crtc *crtc;
10089 struct drm_crtc_commit *commit;
10093 * Adding all modeset locks to aquire_ctx will
10094 * ensure that when the framework release it the
10095 * extra locks we are locking here will get released to
10097 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10102 spin_lock(&crtc->commit_lock);
10103 commit = list_first_entry_or_null(&crtc->commit_list,
10104 struct drm_crtc_commit, commit_entry);
10106 drm_crtc_commit_get(commit);
10107 spin_unlock(&crtc->commit_lock);
10113 * Make sure all pending HW programming completed and
10116 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10119 ret = wait_for_completion_interruptible_timeout(
10120 &commit->flip_done, 10*HZ);
10123 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10124 "timed out\n", crtc->base.id, crtc->name);
10126 drm_crtc_commit_put(commit);
10129 return ret < 0 ? ret : 0;
10132 static void get_freesync_config_for_crtc(
10133 struct dm_crtc_state *new_crtc_state,
10134 struct dm_connector_state *new_con_state)
10136 struct mod_freesync_config config = {0};
10137 struct amdgpu_dm_connector *aconnector =
10138 to_amdgpu_dm_connector(new_con_state->base.connector);
10139 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10140 int vrefresh = drm_mode_vrefresh(mode);
10141 bool fs_vid_mode = false;
10143 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10144 vrefresh >= aconnector->min_vfreq &&
10145 vrefresh <= aconnector->max_vfreq;
10147 if (new_crtc_state->vrr_supported) {
10148 new_crtc_state->stream->ignore_msa_timing_param = true;
10149 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10151 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10152 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10153 config.vsif_supported = true;
10157 config.state = VRR_STATE_ACTIVE_FIXED;
10158 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10160 } else if (new_crtc_state->base.vrr_enabled) {
10161 config.state = VRR_STATE_ACTIVE_VARIABLE;
10163 config.state = VRR_STATE_INACTIVE;
10167 new_crtc_state->freesync_config = config;
10170 static void reset_freesync_config_for_crtc(
10171 struct dm_crtc_state *new_crtc_state)
10173 new_crtc_state->vrr_supported = false;
10175 memset(&new_crtc_state->vrr_infopacket, 0,
10176 sizeof(new_crtc_state->vrr_infopacket));
10180 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10181 struct drm_crtc_state *new_crtc_state)
10183 struct drm_display_mode old_mode, new_mode;
10185 if (!old_crtc_state || !new_crtc_state)
10188 old_mode = old_crtc_state->mode;
10189 new_mode = new_crtc_state->mode;
10191 if (old_mode.clock == new_mode.clock &&
10192 old_mode.hdisplay == new_mode.hdisplay &&
10193 old_mode.vdisplay == new_mode.vdisplay &&
10194 old_mode.htotal == new_mode.htotal &&
10195 old_mode.vtotal != new_mode.vtotal &&
10196 old_mode.hsync_start == new_mode.hsync_start &&
10197 old_mode.vsync_start != new_mode.vsync_start &&
10198 old_mode.hsync_end == new_mode.hsync_end &&
10199 old_mode.vsync_end != new_mode.vsync_end &&
10200 old_mode.hskew == new_mode.hskew &&
10201 old_mode.vscan == new_mode.vscan &&
10202 (old_mode.vsync_end - old_mode.vsync_start) ==
10203 (new_mode.vsync_end - new_mode.vsync_start))
10209 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10210 uint64_t num, den, res;
10211 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10213 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10215 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10216 den = (unsigned long long)new_crtc_state->mode.htotal *
10217 (unsigned long long)new_crtc_state->mode.vtotal;
10219 res = div_u64(num, den);
10220 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10223 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10224 struct drm_atomic_state *state,
10225 struct drm_crtc *crtc,
10226 struct drm_crtc_state *old_crtc_state,
10227 struct drm_crtc_state *new_crtc_state,
10229 bool *lock_and_validation_needed)
10231 struct dm_atomic_state *dm_state = NULL;
10232 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10233 struct dc_stream_state *new_stream;
10237 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10238 * update changed items
10240 struct amdgpu_crtc *acrtc = NULL;
10241 struct amdgpu_dm_connector *aconnector = NULL;
10242 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10243 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10247 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10248 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10249 acrtc = to_amdgpu_crtc(crtc);
10250 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10252 /* TODO This hack should go away */
10253 if (aconnector && enable) {
10254 /* Make sure fake sink is created in plug-in scenario */
10255 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10256 &aconnector->base);
10257 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10258 &aconnector->base);
10260 if (IS_ERR(drm_new_conn_state)) {
10261 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10265 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10266 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10268 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10271 new_stream = create_validate_stream_for_sink(aconnector,
10272 &new_crtc_state->mode,
10274 dm_old_crtc_state->stream);
10277 * we can have no stream on ACTION_SET if a display
10278 * was disconnected during S3, in this case it is not an
10279 * error, the OS will be updated after detection, and
10280 * will do the right thing on next atomic commit
10284 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10285 __func__, acrtc->base.base.id);
10291 * TODO: Check VSDB bits to decide whether this should
10292 * be enabled or not.
10294 new_stream->triggered_crtc_reset.enabled =
10295 dm->force_timing_sync;
10297 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10299 ret = fill_hdr_info_packet(drm_new_conn_state,
10300 &new_stream->hdr_static_metadata);
10305 * If we already removed the old stream from the context
10306 * (and set the new stream to NULL) then we can't reuse
10307 * the old stream even if the stream and scaling are unchanged.
10308 * We'll hit the BUG_ON and black screen.
10310 * TODO: Refactor this function to allow this check to work
10311 * in all conditions.
10313 if (dm_new_crtc_state->stream &&
10314 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10317 if (dm_new_crtc_state->stream &&
10318 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10319 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10320 new_crtc_state->mode_changed = false;
10321 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10322 new_crtc_state->mode_changed);
10326 /* mode_changed flag may get updated above, need to check again */
10327 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10331 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10332 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10333 "connectors_changed:%d\n",
10335 new_crtc_state->enable,
10336 new_crtc_state->active,
10337 new_crtc_state->planes_changed,
10338 new_crtc_state->mode_changed,
10339 new_crtc_state->active_changed,
10340 new_crtc_state->connectors_changed);
10342 /* Remove stream for any changed/disabled CRTC */
10345 if (!dm_old_crtc_state->stream)
10348 if (dm_new_crtc_state->stream &&
10349 is_timing_unchanged_for_freesync(new_crtc_state,
10351 new_crtc_state->mode_changed = false;
10353 "Mode change not required for front porch change, "
10354 "setting mode_changed to %d",
10355 new_crtc_state->mode_changed);
10357 set_freesync_fixed_config(dm_new_crtc_state);
10360 } else if (aconnector &&
10361 is_freesync_video_mode(&new_crtc_state->mode,
10363 struct drm_display_mode *high_mode;
10365 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10366 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10367 set_freesync_fixed_config(dm_new_crtc_state);
10371 ret = dm_atomic_get_state(state, &dm_state);
10375 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10378 /* i.e. reset mode */
10379 if (dc_remove_stream_from_ctx(
10382 dm_old_crtc_state->stream) != DC_OK) {
10387 dc_stream_release(dm_old_crtc_state->stream);
10388 dm_new_crtc_state->stream = NULL;
10390 reset_freesync_config_for_crtc(dm_new_crtc_state);
10392 *lock_and_validation_needed = true;
10394 } else {/* Add stream for any updated/enabled CRTC */
10396 * Quick fix to prevent NULL pointer on new_stream when
10397 * added MST connectors not found in existing crtc_state in the chained mode
10398 * TODO: need to dig out the root cause of that
10400 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10403 if (modereset_required(new_crtc_state))
10406 if (modeset_required(new_crtc_state, new_stream,
10407 dm_old_crtc_state->stream)) {
10409 WARN_ON(dm_new_crtc_state->stream);
10411 ret = dm_atomic_get_state(state, &dm_state);
10415 dm_new_crtc_state->stream = new_stream;
10417 dc_stream_retain(new_stream);
10419 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10422 if (dc_add_stream_to_ctx(
10425 dm_new_crtc_state->stream) != DC_OK) {
10430 *lock_and_validation_needed = true;
10435 /* Release extra reference */
10437 dc_stream_release(new_stream);
10440 * We want to do dc stream updates that do not require a
10441 * full modeset below.
10443 if (!(enable && aconnector && new_crtc_state->active))
10446 * Given above conditions, the dc state cannot be NULL because:
10447 * 1. We're in the process of enabling CRTCs (just been added
10448 * to the dc context, or already is on the context)
10449 * 2. Has a valid connector attached, and
10450 * 3. Is currently active and enabled.
10451 * => The dc stream state currently exists.
10453 BUG_ON(dm_new_crtc_state->stream == NULL);
10455 /* Scaling or underscan settings */
10456 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10457 drm_atomic_crtc_needs_modeset(new_crtc_state))
10458 update_stream_scaling_settings(
10459 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10462 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10465 * Color management settings. We also update color properties
10466 * when a modeset is needed, to ensure it gets reprogrammed.
10468 if (dm_new_crtc_state->base.color_mgmt_changed ||
10469 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10470 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10475 /* Update Freesync settings. */
10476 get_freesync_config_for_crtc(dm_new_crtc_state,
10477 dm_new_conn_state);
10483 dc_stream_release(new_stream);
10487 static bool should_reset_plane(struct drm_atomic_state *state,
10488 struct drm_plane *plane,
10489 struct drm_plane_state *old_plane_state,
10490 struct drm_plane_state *new_plane_state)
10492 struct drm_plane *other;
10493 struct drm_plane_state *old_other_state, *new_other_state;
10494 struct drm_crtc_state *new_crtc_state;
10498 * TODO: Remove this hack once the checks below are sufficient
10499 * enough to determine when we need to reset all the planes on
10502 if (state->allow_modeset)
10505 /* Exit early if we know that we're adding or removing the plane. */
10506 if (old_plane_state->crtc != new_plane_state->crtc)
10509 /* old crtc == new_crtc == NULL, plane not in context. */
10510 if (!new_plane_state->crtc)
10514 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10516 if (!new_crtc_state)
10519 /* CRTC Degamma changes currently require us to recreate planes. */
10520 if (new_crtc_state->color_mgmt_changed)
10523 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10527 * If there are any new primary or overlay planes being added or
10528 * removed then the z-order can potentially change. To ensure
10529 * correct z-order and pipe acquisition the current DC architecture
10530 * requires us to remove and recreate all existing planes.
10532 * TODO: Come up with a more elegant solution for this.
10534 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10535 struct amdgpu_framebuffer *old_afb, *new_afb;
10536 if (other->type == DRM_PLANE_TYPE_CURSOR)
10539 if (old_other_state->crtc != new_plane_state->crtc &&
10540 new_other_state->crtc != new_plane_state->crtc)
10543 if (old_other_state->crtc != new_other_state->crtc)
10546 /* Src/dst size and scaling updates. */
10547 if (old_other_state->src_w != new_other_state->src_w ||
10548 old_other_state->src_h != new_other_state->src_h ||
10549 old_other_state->crtc_w != new_other_state->crtc_w ||
10550 old_other_state->crtc_h != new_other_state->crtc_h)
10553 /* Rotation / mirroring updates. */
10554 if (old_other_state->rotation != new_other_state->rotation)
10557 /* Blending updates. */
10558 if (old_other_state->pixel_blend_mode !=
10559 new_other_state->pixel_blend_mode)
10562 /* Alpha updates. */
10563 if (old_other_state->alpha != new_other_state->alpha)
10566 /* Colorspace changes. */
10567 if (old_other_state->color_range != new_other_state->color_range ||
10568 old_other_state->color_encoding != new_other_state->color_encoding)
10571 /* Framebuffer checks fall at the end. */
10572 if (!old_other_state->fb || !new_other_state->fb)
10575 /* Pixel format changes can require bandwidth updates. */
10576 if (old_other_state->fb->format != new_other_state->fb->format)
10579 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10580 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10582 /* Tiling and DCC changes also require bandwidth updates. */
10583 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10584 old_afb->base.modifier != new_afb->base.modifier)
10591 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10592 struct drm_plane_state *new_plane_state,
10593 struct drm_framebuffer *fb)
10595 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10596 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10597 unsigned int pitch;
10600 if (fb->width > new_acrtc->max_cursor_width ||
10601 fb->height > new_acrtc->max_cursor_height) {
10602 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10603 new_plane_state->fb->width,
10604 new_plane_state->fb->height);
10607 if (new_plane_state->src_w != fb->width << 16 ||
10608 new_plane_state->src_h != fb->height << 16) {
10609 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10613 /* Pitch in pixels */
10614 pitch = fb->pitches[0] / fb->format->cpp[0];
10616 if (fb->width != pitch) {
10617 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10626 /* FB pitch is supported by cursor plane */
10629 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10633 /* Core DRM takes care of checking FB modifiers, so we only need to
10634 * check tiling flags when the FB doesn't have a modifier. */
10635 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10636 if (adev->family < AMDGPU_FAMILY_AI) {
10637 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10638 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10639 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10641 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10644 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10652 static int dm_update_plane_state(struct dc *dc,
10653 struct drm_atomic_state *state,
10654 struct drm_plane *plane,
10655 struct drm_plane_state *old_plane_state,
10656 struct drm_plane_state *new_plane_state,
10658 bool *lock_and_validation_needed)
10661 struct dm_atomic_state *dm_state = NULL;
10662 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10663 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10664 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10665 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10666 struct amdgpu_crtc *new_acrtc;
10671 new_plane_crtc = new_plane_state->crtc;
10672 old_plane_crtc = old_plane_state->crtc;
10673 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10674 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10676 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10677 if (!enable || !new_plane_crtc ||
10678 drm_atomic_plane_disabling(plane->state, new_plane_state))
10681 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10683 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10684 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10688 if (new_plane_state->fb) {
10689 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10690 new_plane_state->fb);
10698 needs_reset = should_reset_plane(state, plane, old_plane_state,
10701 /* Remove any changed/removed planes */
10706 if (!old_plane_crtc)
10709 old_crtc_state = drm_atomic_get_old_crtc_state(
10710 state, old_plane_crtc);
10711 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10713 if (!dm_old_crtc_state->stream)
10716 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10717 plane->base.id, old_plane_crtc->base.id);
10719 ret = dm_atomic_get_state(state, &dm_state);
10723 if (!dc_remove_plane_from_context(
10725 dm_old_crtc_state->stream,
10726 dm_old_plane_state->dc_state,
10727 dm_state->context)) {
10733 dc_plane_state_release(dm_old_plane_state->dc_state);
10734 dm_new_plane_state->dc_state = NULL;
10736 *lock_and_validation_needed = true;
10738 } else { /* Add new planes */
10739 struct dc_plane_state *dc_new_plane_state;
10741 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10744 if (!new_plane_crtc)
10747 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10748 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10750 if (!dm_new_crtc_state->stream)
10756 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10760 WARN_ON(dm_new_plane_state->dc_state);
10762 dc_new_plane_state = dc_create_plane_state(dc);
10763 if (!dc_new_plane_state)
10766 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10767 plane->base.id, new_plane_crtc->base.id);
10769 ret = fill_dc_plane_attributes(
10770 drm_to_adev(new_plane_crtc->dev),
10771 dc_new_plane_state,
10775 dc_plane_state_release(dc_new_plane_state);
10779 ret = dm_atomic_get_state(state, &dm_state);
10781 dc_plane_state_release(dc_new_plane_state);
10786 * Any atomic check errors that occur after this will
10787 * not need a release. The plane state will be attached
10788 * to the stream, and therefore part of the atomic
10789 * state. It'll be released when the atomic state is
10792 if (!dc_add_plane_to_context(
10794 dm_new_crtc_state->stream,
10795 dc_new_plane_state,
10796 dm_state->context)) {
10798 dc_plane_state_release(dc_new_plane_state);
10802 dm_new_plane_state->dc_state = dc_new_plane_state;
10804 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10806 /* Tell DC to do a full surface update every time there
10807 * is a plane change. Inefficient, but works for now.
10809 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10811 *lock_and_validation_needed = true;
10818 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10819 int *src_w, int *src_h)
10821 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10822 case DRM_MODE_ROTATE_90:
10823 case DRM_MODE_ROTATE_270:
10824 *src_w = plane_state->src_h >> 16;
10825 *src_h = plane_state->src_w >> 16;
10827 case DRM_MODE_ROTATE_0:
10828 case DRM_MODE_ROTATE_180:
10830 *src_w = plane_state->src_w >> 16;
10831 *src_h = plane_state->src_h >> 16;
10836 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10837 struct drm_crtc *crtc,
10838 struct drm_crtc_state *new_crtc_state)
10840 struct drm_plane *cursor = crtc->cursor, *underlying;
10841 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10843 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10844 int cursor_src_w, cursor_src_h;
10845 int underlying_src_w, underlying_src_h;
10847 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10848 * cursor per pipe but it's going to inherit the scaling and
10849 * positioning from the underlying pipe. Check the cursor plane's
10850 * blending properties match the underlying planes'. */
10852 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10853 if (!new_cursor_state || !new_cursor_state->fb) {
10857 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10858 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10859 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10861 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10862 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10863 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10866 /* Ignore disabled planes */
10867 if (!new_underlying_state->fb)
10870 dm_get_oriented_plane_size(new_underlying_state,
10871 &underlying_src_w, &underlying_src_h);
10872 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10873 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10875 if (cursor_scale_w != underlying_scale_w ||
10876 cursor_scale_h != underlying_scale_h) {
10877 drm_dbg_atomic(crtc->dev,
10878 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10879 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10883 /* If this plane covers the whole CRTC, no need to check planes underneath */
10884 if (new_underlying_state->crtc_x <= 0 &&
10885 new_underlying_state->crtc_y <= 0 &&
10886 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10887 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10894 #if defined(CONFIG_DRM_AMD_DC_DCN)
10895 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10897 struct drm_connector *connector;
10898 struct drm_connector_state *conn_state, *old_conn_state;
10899 struct amdgpu_dm_connector *aconnector = NULL;
10901 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10902 if (!conn_state->crtc)
10903 conn_state = old_conn_state;
10905 if (conn_state->crtc != crtc)
10908 aconnector = to_amdgpu_dm_connector(connector);
10909 if (!aconnector->port || !aconnector->mst_port)
10918 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10923 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10924 * @dev: The DRM device
10925 * @state: The atomic state to commit
10927 * Validate that the given atomic state is programmable by DC into hardware.
10928 * This involves constructing a &struct dc_state reflecting the new hardware
10929 * state we wish to commit, then querying DC to see if it is programmable. It's
10930 * important not to modify the existing DC state. Otherwise, atomic_check
10931 * may unexpectedly commit hardware changes.
10933 * When validating the DC state, it's important that the right locks are
10934 * acquired. For full updates case which removes/adds/updates streams on one
10935 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10936 * that any such full update commit will wait for completion of any outstanding
10937 * flip using DRMs synchronization events.
10939 * Note that DM adds the affected connectors for all CRTCs in state, when that
10940 * might not seem necessary. This is because DC stream creation requires the
10941 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10942 * be possible but non-trivial - a possible TODO item.
10944 * Return: -Error code if validation failed.
10946 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10947 struct drm_atomic_state *state)
10949 struct amdgpu_device *adev = drm_to_adev(dev);
10950 struct dm_atomic_state *dm_state = NULL;
10951 struct dc *dc = adev->dm.dc;
10952 struct drm_connector *connector;
10953 struct drm_connector_state *old_con_state, *new_con_state;
10954 struct drm_crtc *crtc;
10955 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10956 struct drm_plane *plane;
10957 struct drm_plane_state *old_plane_state, *new_plane_state;
10958 enum dc_status status;
10960 bool lock_and_validation_needed = false;
10961 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10962 #if defined(CONFIG_DRM_AMD_DC_DCN)
10963 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10964 struct drm_dp_mst_topology_state *mst_state;
10965 struct drm_dp_mst_topology_mgr *mgr;
10968 trace_amdgpu_dm_atomic_check_begin(state);
10970 ret = drm_atomic_helper_check_modeset(dev, state);
10972 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10976 /* Check connector changes */
10977 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10978 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10979 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10981 /* Skip connectors that are disabled or part of modeset already. */
10982 if (!old_con_state->crtc && !new_con_state->crtc)
10985 if (!new_con_state->crtc)
10988 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10989 if (IS_ERR(new_crtc_state)) {
10990 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10991 ret = PTR_ERR(new_crtc_state);
10995 if (dm_old_con_state->abm_level !=
10996 dm_new_con_state->abm_level)
10997 new_crtc_state->connectors_changed = true;
11000 #if defined(CONFIG_DRM_AMD_DC_DCN)
11001 if (dc_resource_is_dsc_encoding_supported(dc)) {
11002 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11003 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11004 ret = add_affected_mst_dsc_crtcs(state, crtc);
11006 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11011 pre_validate_dsc(state, &dm_state, vars);
11014 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11015 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11017 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11018 !new_crtc_state->color_mgmt_changed &&
11019 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11020 dm_old_crtc_state->dsc_force_changed == false)
11023 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11025 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11029 if (!new_crtc_state->enable)
11032 ret = drm_atomic_add_affected_connectors(state, crtc);
11034 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11038 ret = drm_atomic_add_affected_planes(state, crtc);
11040 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11044 if (dm_old_crtc_state->dsc_force_changed)
11045 new_crtc_state->mode_changed = true;
11049 * Add all primary and overlay planes on the CRTC to the state
11050 * whenever a plane is enabled to maintain correct z-ordering
11051 * and to enable fast surface updates.
11053 drm_for_each_crtc(crtc, dev) {
11054 bool modified = false;
11056 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11057 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11060 if (new_plane_state->crtc == crtc ||
11061 old_plane_state->crtc == crtc) {
11070 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11071 if (plane->type == DRM_PLANE_TYPE_CURSOR)
11075 drm_atomic_get_plane_state(state, plane);
11077 if (IS_ERR(new_plane_state)) {
11078 ret = PTR_ERR(new_plane_state);
11079 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11085 /* Remove exiting planes if they are modified */
11086 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11087 ret = dm_update_plane_state(dc, state, plane,
11091 &lock_and_validation_needed);
11093 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11098 /* Disable all crtcs which require disable */
11099 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11100 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11104 &lock_and_validation_needed);
11106 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11111 /* Enable all crtcs which require enable */
11112 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11113 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11117 &lock_and_validation_needed);
11119 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11124 /* Add new/modified planes */
11125 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11126 ret = dm_update_plane_state(dc, state, plane,
11130 &lock_and_validation_needed);
11132 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11137 /* Run this here since we want to validate the streams we created */
11138 ret = drm_atomic_helper_check_planes(dev, state);
11140 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11144 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11145 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11146 if (dm_new_crtc_state->mpo_requested)
11147 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11150 /* Check cursor planes scaling */
11151 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11152 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11154 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11159 if (state->legacy_cursor_update) {
11161 * This is a fast cursor update coming from the plane update
11162 * helper, check if it can be done asynchronously for better
11165 state->async_update =
11166 !drm_atomic_helper_async_check(dev, state);
11169 * Skip the remaining global validation if this is an async
11170 * update. Cursor updates can be done without affecting
11171 * state or bandwidth calcs and this avoids the performance
11172 * penalty of locking the private state object and
11173 * allocating a new dc_state.
11175 if (state->async_update)
11179 /* Check scaling and underscan changes*/
11180 /* TODO Removed scaling changes validation due to inability to commit
11181 * new stream into context w\o causing full reset. Need to
11182 * decide how to handle.
11184 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11185 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11186 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11187 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11189 /* Skip any modesets/resets */
11190 if (!acrtc || drm_atomic_crtc_needs_modeset(
11191 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11194 /* Skip any thing not scale or underscan changes */
11195 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11198 lock_and_validation_needed = true;
11201 #if defined(CONFIG_DRM_AMD_DC_DCN)
11202 /* set the slot info for each mst_state based on the link encoding format */
11203 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11204 struct amdgpu_dm_connector *aconnector;
11205 struct drm_connector *connector;
11206 struct drm_connector_list_iter iter;
11207 u8 link_coding_cap;
11209 if (!mgr->mst_state )
11212 drm_connector_list_iter_begin(dev, &iter);
11213 drm_for_each_connector_iter(connector, &iter) {
11214 int id = connector->index;
11216 if (id == mst_state->mgr->conn_base_id) {
11217 aconnector = to_amdgpu_dm_connector(connector);
11218 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11219 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11224 drm_connector_list_iter_end(&iter);
11229 * Streams and planes are reset when there are changes that affect
11230 * bandwidth. Anything that affects bandwidth needs to go through
11231 * DC global validation to ensure that the configuration can be applied
11234 * We have to currently stall out here in atomic_check for outstanding
11235 * commits to finish in this case because our IRQ handlers reference
11236 * DRM state directly - we can end up disabling interrupts too early
11239 * TODO: Remove this stall and drop DM state private objects.
11241 if (lock_and_validation_needed) {
11242 ret = dm_atomic_get_state(state, &dm_state);
11244 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11248 ret = do_aquire_global_lock(dev, state);
11250 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11254 #if defined(CONFIG_DRM_AMD_DC_DCN)
11255 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11256 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11260 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11262 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11268 * Perform validation of MST topology in the state:
11269 * We need to perform MST atomic check before calling
11270 * dc_validate_global_state(), or there is a chance
11271 * to get stuck in an infinite loop and hang eventually.
11273 ret = drm_dp_mst_atomic_check(state);
11275 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11278 status = dc_validate_global_state(dc, dm_state->context, true);
11279 if (status != DC_OK) {
11280 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11281 dc_status_to_str(status), status);
11287 * The commit is a fast update. Fast updates shouldn't change
11288 * the DC context, affect global validation, and can have their
11289 * commit work done in parallel with other commits not touching
11290 * the same resource. If we have a new DC context as part of
11291 * the DM atomic state from validation we need to free it and
11292 * retain the existing one instead.
11294 * Furthermore, since the DM atomic state only contains the DC
11295 * context and can safely be annulled, we can free the state
11296 * and clear the associated private object now to free
11297 * some memory and avoid a possible use-after-free later.
11300 for (i = 0; i < state->num_private_objs; i++) {
11301 struct drm_private_obj *obj = state->private_objs[i].ptr;
11303 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11304 int j = state->num_private_objs-1;
11306 dm_atomic_destroy_state(obj,
11307 state->private_objs[i].state);
11309 /* If i is not at the end of the array then the
11310 * last element needs to be moved to where i was
11311 * before the array can safely be truncated.
11314 state->private_objs[i] =
11315 state->private_objs[j];
11317 state->private_objs[j].ptr = NULL;
11318 state->private_objs[j].state = NULL;
11319 state->private_objs[j].old_state = NULL;
11320 state->private_objs[j].new_state = NULL;
11322 state->num_private_objs = j;
11328 /* Store the overall update type for use later in atomic check. */
11329 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11330 struct dm_crtc_state *dm_new_crtc_state =
11331 to_dm_crtc_state(new_crtc_state);
11333 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11338 /* Must be success */
11341 trace_amdgpu_dm_atomic_check_finish(state, ret);
11346 if (ret == -EDEADLK)
11347 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11348 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11349 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11351 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11353 trace_amdgpu_dm_atomic_check_finish(state, ret);
11358 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11359 struct amdgpu_dm_connector *amdgpu_dm_connector)
11362 bool capable = false;
11364 if (amdgpu_dm_connector->dc_link &&
11365 dm_helpers_dp_read_dpcd(
11367 amdgpu_dm_connector->dc_link,
11368 DP_DOWN_STREAM_PORT_COUNT,
11370 sizeof(dpcd_data))) {
11371 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11377 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11378 unsigned int offset,
11379 unsigned int total_length,
11381 unsigned int length,
11382 struct amdgpu_hdmi_vsdb_info *vsdb)
11385 union dmub_rb_cmd cmd;
11386 struct dmub_cmd_send_edid_cea *input;
11387 struct dmub_cmd_edid_cea_output *output;
11389 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11392 memset(&cmd, 0, sizeof(cmd));
11394 input = &cmd.edid_cea.data.input;
11396 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11397 cmd.edid_cea.header.sub_type = 0;
11398 cmd.edid_cea.header.payload_bytes =
11399 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11400 input->offset = offset;
11401 input->length = length;
11402 input->cea_total_length = total_length;
11403 memcpy(input->payload, data, length);
11405 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11407 DRM_ERROR("EDID CEA parser failed\n");
11411 output = &cmd.edid_cea.data.output;
11413 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11414 if (!output->ack.success) {
11415 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11416 output->ack.offset);
11418 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11419 if (!output->amd_vsdb.vsdb_found)
11422 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11423 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11424 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11425 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11427 DRM_WARN("Unknown EDID CEA parser results\n");
11434 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11435 uint8_t *edid_ext, int len,
11436 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11440 /* send extension block to DMCU for parsing */
11441 for (i = 0; i < len; i += 8) {
11445 /* send 8 bytes a time */
11446 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11450 /* EDID block sent completed, expect result */
11451 int version, min_rate, max_rate;
11453 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11455 /* amd vsdb found */
11456 vsdb_info->freesync_supported = 1;
11457 vsdb_info->amd_vsdb_version = version;
11458 vsdb_info->min_refresh_rate_hz = min_rate;
11459 vsdb_info->max_refresh_rate_hz = max_rate;
11467 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11475 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11476 uint8_t *edid_ext, int len,
11477 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11481 /* send extension block to DMCU for parsing */
11482 for (i = 0; i < len; i += 8) {
11483 /* send 8 bytes a time */
11484 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11488 return vsdb_info->freesync_supported;
11491 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11492 uint8_t *edid_ext, int len,
11493 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11495 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11497 if (adev->dm.dmub_srv)
11498 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11500 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11503 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11504 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11506 uint8_t *edid_ext = NULL;
11508 bool valid_vsdb_found = false;
11510 /*----- drm_find_cea_extension() -----*/
11511 /* No EDID or EDID extensions */
11512 if (edid == NULL || edid->extensions == 0)
11515 /* Find CEA extension */
11516 for (i = 0; i < edid->extensions; i++) {
11517 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11518 if (edid_ext[0] == CEA_EXT)
11522 if (i == edid->extensions)
11525 /*----- cea_db_offsets() -----*/
11526 if (edid_ext[0] != CEA_EXT)
11529 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11531 return valid_vsdb_found ? i : -ENODEV;
11534 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11538 struct detailed_timing *timing;
11539 struct detailed_non_pixel *data;
11540 struct detailed_data_monitor_range *range;
11541 struct amdgpu_dm_connector *amdgpu_dm_connector =
11542 to_amdgpu_dm_connector(connector);
11543 struct dm_connector_state *dm_con_state = NULL;
11544 struct dc_sink *sink;
11546 struct drm_device *dev = connector->dev;
11547 struct amdgpu_device *adev = drm_to_adev(dev);
11548 bool freesync_capable = false;
11549 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11551 if (!connector->state) {
11552 DRM_ERROR("%s - Connector has no state", __func__);
11556 sink = amdgpu_dm_connector->dc_sink ?
11557 amdgpu_dm_connector->dc_sink :
11558 amdgpu_dm_connector->dc_em_sink;
11560 if (!edid || !sink) {
11561 dm_con_state = to_dm_connector_state(connector->state);
11563 amdgpu_dm_connector->min_vfreq = 0;
11564 amdgpu_dm_connector->max_vfreq = 0;
11565 amdgpu_dm_connector->pixel_clock_mhz = 0;
11566 connector->display_info.monitor_range.min_vfreq = 0;
11567 connector->display_info.monitor_range.max_vfreq = 0;
11568 freesync_capable = false;
11573 dm_con_state = to_dm_connector_state(connector->state);
11575 if (!adev->dm.freesync_module)
11579 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11580 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11581 bool edid_check_required = false;
11584 edid_check_required = is_dp_capable_without_timing_msa(
11586 amdgpu_dm_connector);
11589 if (edid_check_required == true && (edid->version > 1 ||
11590 (edid->version == 1 && edid->revision > 1))) {
11591 for (i = 0; i < 4; i++) {
11593 timing = &edid->detailed_timings[i];
11594 data = &timing->data.other_data;
11595 range = &data->data.range;
11597 * Check if monitor has continuous frequency mode
11599 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11602 * Check for flag range limits only. If flag == 1 then
11603 * no additional timing information provided.
11604 * Default GTF, GTF Secondary curve and CVT are not
11607 if (range->flags != 1)
11610 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11611 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11612 amdgpu_dm_connector->pixel_clock_mhz =
11613 range->pixel_clock_mhz * 10;
11615 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11616 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11621 if (amdgpu_dm_connector->max_vfreq -
11622 amdgpu_dm_connector->min_vfreq > 10) {
11624 freesync_capable = true;
11627 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11628 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11629 if (i >= 0 && vsdb_info.freesync_supported) {
11630 timing = &edid->detailed_timings[i];
11631 data = &timing->data.other_data;
11633 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11634 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11635 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11636 freesync_capable = true;
11638 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11639 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11645 dm_con_state->freesync_capable = freesync_capable;
11647 if (connector->vrr_capable_property)
11648 drm_connector_set_vrr_capable_property(connector,
11652 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11654 struct amdgpu_device *adev = drm_to_adev(dev);
11655 struct dc *dc = adev->dm.dc;
11658 mutex_lock(&adev->dm.dc_lock);
11659 if (dc->current_state) {
11660 for (i = 0; i < dc->current_state->stream_count; ++i)
11661 dc->current_state->streams[i]
11662 ->triggered_crtc_reset.enabled =
11663 adev->dm.force_timing_sync;
11665 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11666 dc_trigger_sync(dc, dc->current_state);
11668 mutex_unlock(&adev->dm.dc_lock);
11671 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11672 uint32_t value, const char *func_name)
11674 #ifdef DM_CHECK_ADDR_0
11675 if (address == 0) {
11676 DC_ERR("invalid register write. address = 0");
11680 cgs_write_register(ctx->cgs_device, address, value);
11681 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11684 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11685 const char *func_name)
11688 #ifdef DM_CHECK_ADDR_0
11689 if (address == 0) {
11690 DC_ERR("invalid register read; address = 0\n");
11695 if (ctx->dmub_srv &&
11696 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11697 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11702 value = cgs_read_register(ctx->cgs_device, address);
11704 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11709 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11710 struct dc_context *ctx,
11711 uint8_t status_type,
11712 uint32_t *operation_result)
11714 struct amdgpu_device *adev = ctx->driver_context;
11715 int return_status = -1;
11716 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11719 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11720 return_status = p_notify->aux_reply.length;
11721 *operation_result = p_notify->result;
11722 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11723 *operation_result = AUX_RET_ERROR_TIMEOUT;
11724 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11725 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11727 *operation_result = AUX_RET_ERROR_UNKNOWN;
11730 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11732 *operation_result = p_notify->sc_status;
11734 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11738 return return_status;
11741 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11742 unsigned int link_index, void *cmd_payload, void *operation_result)
11744 struct amdgpu_device *adev = ctx->driver_context;
11748 dc_process_dmub_aux_transfer_async(ctx->dc,
11749 link_index, (struct aux_payload *)cmd_payload);
11750 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11751 (struct set_config_cmd_payload *)cmd_payload,
11752 adev->dm.dmub_notify)) {
11753 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11754 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11755 (uint32_t *)operation_result);
11758 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11760 DRM_ERROR("wait_for_completion_timeout timeout!");
11761 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11762 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11763 (uint32_t *)operation_result);
11767 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11768 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11770 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11771 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11772 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11773 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11774 adev->dm.dmub_notify->aux_reply.length);
11779 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11780 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11781 (uint32_t *)operation_result);
11785 * Check whether seamless boot is supported.
11787 * So far we only support seamless boot on CHIP_VANGOGH.
11788 * If everything goes well, we may consider expanding
11789 * seamless boot to other ASICs.
11791 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11793 switch (adev->asic_type) {
11795 if (!adev->mman.keep_stolen_vga_memory)