2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
63 #include "amdgpu_dm_psr.h"
65 #include "ivsrcid/ivsrcid_vislands30.h"
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
94 #include "soc15_common.h"
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
118 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
121 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135 * requests into DC requests, and DC responses into DRM responses.
137 * The root control structure is &struct amdgpu_display_manager.
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
182 * initializes drm_device display related structures, based on the information
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
186 * Returns 0 on success
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 struct drm_plane *plane,
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
202 struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
214 static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
227 * dm_vblank_get_counter
230 * Get counter for number of vertical blanks
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
237 * Counter for vertical blanks
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
241 if (crtc >= adev->mode_info.num_crtc)
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
246 if (acrtc->dm_irq_params.stream == NULL) {
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 u32 *vbl, u32 *position)
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
266 if (acrtc->dm_irq_params.stream == NULL) {
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
289 static bool dm_is_idle(void *handle)
295 static int dm_wait_for_idle(void *handle)
301 static bool dm_check_soft_reset(void *handle)
306 static int dm_soft_reset(void *handle)
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
316 struct drm_device *dev = adev_to_drm(adev);
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
320 if (WARN_ON(otg_inst == -1))
321 return adev->mode_info.crtcs[0];
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
326 if (amdgpu_crtc->otg_inst == otg_inst)
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
365 static void dm_pflip_high_irq(void *interrupt_params)
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
371 struct drm_pending_vblank_event *e;
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
377 /* IRQ could occur when in initial stage */
378 /* TODO work and BO cleanup */
379 if (amdgpu_crtc == NULL) {
380 DC_LOG_PFLIP("CRTC is null, returning.\n");
384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
462 static void dm_vupdate_high_irq(void *interrupt_params)
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
500 drm_crtc_handle_vblank(&acrtc->base);
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
511 dc_stream_adjust_vmin_vmax(
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528 static void dm_crtc_high_irq(void *interrupt_params)
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
552 drm_crtc_handle_vblank(&acrtc->base);
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
594 drm_crtc_vblank_put(&acrtc->base);
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
607 * @interrupt_params: interrupt parameters
609 * Used to set crc window/read out crc value at vertical line 0 position
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
635 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
651 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
653 struct amdgpu_dm_connector *aconnector;
654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
674 link_index = notify->link_index;
675 link = adev->dm.dc->links[link_index];
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 hpd_aconnector = aconnector;
686 drm_connector_list_iter_end(&iter);
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
708 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
720 static void dm_handle_hpd_work(struct work_struct *work)
722 struct dmub_hpd_work *dmub_hpd_wrk;
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
736 kfree(dmub_hpd_wrk->dmub_notify);
741 #define DMUB_TRACE_MAX_READ 64
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
746 * Handles the Outbox Interrupt
749 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
757 struct dmub_hpd_work *dmub_hpd_wrk;
758 struct dc_link *plink = NULL;
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
764 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
773 if (dm->dmub_thread_offload[notify.type] == true) {
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification));
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
794 DP_HPD_PLUG ? true : false;
797 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
799 dm->dmub_callback[notify.type](adev, ¬ify);
801 } while (notify.pending_notification);
806 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
807 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
808 entry.param0, entry.param1);
810 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
811 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
817 } while (count <= DMUB_TRACE_MAX_READ);
819 if (count > DMUB_TRACE_MAX_READ)
820 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
822 #endif /* CONFIG_DRM_AMD_DC_DCN */
824 static int dm_set_clockgating_state(void *handle,
825 enum amd_clockgating_state state)
830 static int dm_set_powergating_state(void *handle,
831 enum amd_powergating_state state)
836 /* Prototypes of private functions */
837 static int dm_early_init(void* handle);
839 /* Allocate memory for FBC compressed data */
840 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
842 struct drm_device *dev = connector->dev;
843 struct amdgpu_device *adev = drm_to_adev(dev);
844 struct dm_compressor_info *compressor = &adev->dm.compressor;
845 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
846 struct drm_display_mode *mode;
847 unsigned long max_size = 0;
849 if (adev->dm.dc->fbc_compressor == NULL)
852 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
855 if (compressor->bo_ptr)
859 list_for_each_entry(mode, &connector->modes, head) {
860 if (max_size < mode->htotal * mode->vtotal)
861 max_size = mode->htotal * mode->vtotal;
865 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
866 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
867 &compressor->gpu_addr, &compressor->cpu_addr);
870 DRM_ERROR("DM: Failed to initialize FBC\n");
872 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
873 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
880 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
881 int pipe, bool *enabled,
882 unsigned char *buf, int max_bytes)
884 struct drm_device *dev = dev_get_drvdata(kdev);
885 struct amdgpu_device *adev = drm_to_adev(dev);
886 struct drm_connector *connector;
887 struct drm_connector_list_iter conn_iter;
888 struct amdgpu_dm_connector *aconnector;
893 mutex_lock(&adev->dm.audio_lock);
895 drm_connector_list_iter_begin(dev, &conn_iter);
896 drm_for_each_connector_iter(connector, &conn_iter) {
897 aconnector = to_amdgpu_dm_connector(connector);
898 if (aconnector->audio_inst != port)
902 ret = drm_eld_size(connector->eld);
903 memcpy(buf, connector->eld, min(max_bytes, ret));
907 drm_connector_list_iter_end(&conn_iter);
909 mutex_unlock(&adev->dm.audio_lock);
911 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
916 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
917 .get_eld = amdgpu_dm_audio_component_get_eld,
920 static int amdgpu_dm_audio_component_bind(struct device *kdev,
921 struct device *hda_kdev, void *data)
923 struct drm_device *dev = dev_get_drvdata(kdev);
924 struct amdgpu_device *adev = drm_to_adev(dev);
925 struct drm_audio_component *acomp = data;
927 acomp->ops = &amdgpu_dm_audio_component_ops;
929 adev->dm.audio_component = acomp;
934 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
935 struct device *hda_kdev, void *data)
937 struct drm_device *dev = dev_get_drvdata(kdev);
938 struct amdgpu_device *adev = drm_to_adev(dev);
939 struct drm_audio_component *acomp = data;
943 adev->dm.audio_component = NULL;
946 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
947 .bind = amdgpu_dm_audio_component_bind,
948 .unbind = amdgpu_dm_audio_component_unbind,
951 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
958 adev->mode_info.audio.enabled = true;
960 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
962 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
963 adev->mode_info.audio.pin[i].channels = -1;
964 adev->mode_info.audio.pin[i].rate = -1;
965 adev->mode_info.audio.pin[i].bits_per_sample = -1;
966 adev->mode_info.audio.pin[i].status_bits = 0;
967 adev->mode_info.audio.pin[i].category_code = 0;
968 adev->mode_info.audio.pin[i].connected = false;
969 adev->mode_info.audio.pin[i].id =
970 adev->dm.dc->res_pool->audios[i]->inst;
971 adev->mode_info.audio.pin[i].offset = 0;
974 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 adev->dm.audio_registered = true;
983 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
988 if (!adev->mode_info.audio.enabled)
991 if (adev->dm.audio_registered) {
992 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
993 adev->dm.audio_registered = false;
996 /* TODO: Disable audio? */
998 adev->mode_info.audio.enabled = false;
1001 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1003 struct drm_audio_component *acomp = adev->dm.audio_component;
1005 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1006 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1008 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1013 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1015 const struct dmcub_firmware_header_v1_0 *hdr;
1016 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1017 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1018 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1019 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1020 struct abm *abm = adev->dm.dc->res_pool->abm;
1021 struct dmub_srv_hw_params hw_params;
1022 enum dmub_status status;
1023 const unsigned char *fw_inst_const, *fw_bss_data;
1024 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1025 bool has_hw_support;
1026 struct dc *dc = adev->dm.dc;
1029 /* DMUB isn't supported on the ASIC. */
1033 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038 /* Firmware required for DMUB support. */
1039 DRM_ERROR("No firmware provided for DMUB.\n");
1043 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1044 if (status != DMUB_STATUS_OK) {
1045 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049 if (!has_hw_support) {
1050 DRM_INFO("DMUB unsupported on ASIC\n");
1054 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1056 fw_inst_const = dmub_fw->data +
1057 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 fw_bss_data = dmub_fw->data +
1061 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1062 le32_to_cpu(hdr->inst_const_bytes);
1064 /* Copy firmware and bios info into FB memory. */
1065 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1066 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1068 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1070 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1071 * amdgpu_ucode_init_single_fw will load dmub firmware
1072 * fw_inst_const part to cw0; otherwise, the firmware back door load
1073 * will be done by dm_dmub_hw_init
1075 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1076 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1077 fw_inst_const_size);
1080 if (fw_bss_data_size)
1081 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1082 fw_bss_data, fw_bss_data_size);
1084 /* Copy firmware bios info into FB memory. */
1085 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1088 /* Reset regions that need to be reset. */
1089 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1090 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1092 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1093 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1095 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1096 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1098 /* Initialize hardware. */
1099 memset(&hw_params, 0, sizeof(hw_params));
1100 hw_params.fb_base = adev->gmc.fb_start;
1101 hw_params.fb_offset = adev->gmc.aper_base;
1103 /* backdoor load firmware and trigger dmub running */
1104 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1105 hw_params.load_inst_const = true;
1108 hw_params.psp_version = dmcu->psp_version;
1110 for (i = 0; i < fb_info->num_fb; ++i)
1111 hw_params.fb[i] = &fb_info->fb[i];
1113 switch (adev->asic_type) {
1114 case CHIP_YELLOW_CARP:
1115 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1116 hw_params.dpia_supported = true;
1117 #if defined(CONFIG_DRM_AMD_DC_DCN)
1118 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1126 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1127 if (status != DMUB_STATUS_OK) {
1128 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1132 /* Wait for firmware load to finish. */
1133 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1134 if (status != DMUB_STATUS_OK)
1135 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1137 /* Init DMCU and ABM if available. */
1139 dmcu->funcs->dmcu_init(dmcu);
1140 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1143 if (!adev->dm.dc->ctx->dmub_srv)
1144 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1145 if (!adev->dm.dc->ctx->dmub_srv) {
1146 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1150 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1151 adev->dm.dmcub_fw_version);
1156 #if defined(CONFIG_DRM_AMD_DC_DCN)
1157 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1160 uint32_t logical_addr_low;
1161 uint32_t logical_addr_high;
1162 uint32_t agp_base, agp_bot, agp_top;
1163 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1165 memset(pa_config, 0, sizeof(*pa_config));
1167 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1168 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1170 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1172 * Raven2 has a HW issue that it is unable to use the vram which
1173 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1174 * workaround that increase system aperture high address (add 1)
1175 * to get rid of the VM fault and hardware hang.
1177 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1179 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1182 agp_bot = adev->gmc.agp_start >> 24;
1183 agp_top = adev->gmc.agp_end >> 24;
1186 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1187 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1188 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1189 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1190 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1191 page_table_base.low_part = lower_32_bits(pt_base);
1193 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1194 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1196 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1197 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1198 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1200 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1201 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1202 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1204 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1205 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1206 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1208 pa_config->is_hvm_enabled = 0;
1212 #if defined(CONFIG_DRM_AMD_DC_DCN)
1213 static void vblank_control_worker(struct work_struct *work)
1215 struct vblank_control_work *vblank_work =
1216 container_of(work, struct vblank_control_work, work);
1217 struct amdgpu_display_manager *dm = vblank_work->dm;
1219 mutex_lock(&dm->dc_lock);
1221 if (vblank_work->enable)
1222 dm->active_vblank_irq_count++;
1223 else if(dm->active_vblank_irq_count)
1224 dm->active_vblank_irq_count--;
1226 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1228 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1230 /* Control PSR based on vblank requirements from OS */
1231 if (vblank_work->stream && vblank_work->stream->link) {
1232 if (vblank_work->enable) {
1233 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1234 amdgpu_dm_psr_disable(vblank_work->stream);
1235 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1236 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1237 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1238 amdgpu_dm_psr_enable(vblank_work->stream);
1242 mutex_unlock(&dm->dc_lock);
1244 dc_stream_release(vblank_work->stream);
1251 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1253 struct hpd_rx_irq_offload_work *offload_work;
1254 struct amdgpu_dm_connector *aconnector;
1255 struct dc_link *dc_link;
1256 struct amdgpu_device *adev;
1257 enum dc_connection_type new_connection_type = dc_connection_none;
1258 unsigned long flags;
1260 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1261 aconnector = offload_work->offload_wq->aconnector;
1264 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1268 adev = drm_to_adev(aconnector->base.dev);
1269 dc_link = aconnector->dc_link;
1271 mutex_lock(&aconnector->hpd_lock);
1272 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1273 DRM_ERROR("KMS: Failed to detect connector\n");
1274 mutex_unlock(&aconnector->hpd_lock);
1276 if (new_connection_type == dc_connection_none)
1279 if (amdgpu_in_reset(adev))
1282 mutex_lock(&adev->dm.dc_lock);
1283 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1284 dc_link_dp_handle_automated_test(dc_link);
1285 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1286 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1287 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1288 dc_link_dp_handle_link_loss(dc_link);
1289 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1290 offload_work->offload_wq->is_handling_link_loss = false;
1291 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1293 mutex_unlock(&adev->dm.dc_lock);
1296 kfree(offload_work);
1300 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1302 int max_caps = dc->caps.max_links;
1304 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1306 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1308 if (!hpd_rx_offload_wq)
1312 for (i = 0; i < max_caps; i++) {
1313 hpd_rx_offload_wq[i].wq =
1314 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1316 if (hpd_rx_offload_wq[i].wq == NULL) {
1317 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1321 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1324 return hpd_rx_offload_wq;
1327 struct amdgpu_stutter_quirk {
1335 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1336 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1337 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1341 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1343 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1345 while (p && p->chip_device != 0) {
1346 if (pdev->vendor == p->chip_vendor &&
1347 pdev->device == p->chip_device &&
1348 pdev->subsystem_vendor == p->subsys_vendor &&
1349 pdev->subsystem_device == p->subsys_device &&
1350 pdev->revision == p->revision) {
1358 static int amdgpu_dm_init(struct amdgpu_device *adev)
1360 struct dc_init_data init_data;
1361 #ifdef CONFIG_DRM_AMD_DC_HDCP
1362 struct dc_callback_init init_params;
1366 adev->dm.ddev = adev_to_drm(adev);
1367 adev->dm.adev = adev;
1369 /* Zero all the fields */
1370 memset(&init_data, 0, sizeof(init_data));
1371 #ifdef CONFIG_DRM_AMD_DC_HDCP
1372 memset(&init_params, 0, sizeof(init_params));
1375 mutex_init(&adev->dm.dc_lock);
1376 mutex_init(&adev->dm.audio_lock);
1377 #if defined(CONFIG_DRM_AMD_DC_DCN)
1378 spin_lock_init(&adev->dm.vblank_lock);
1381 if(amdgpu_dm_irq_init(adev)) {
1382 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1386 init_data.asic_id.chip_family = adev->family;
1388 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1389 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1390 init_data.asic_id.chip_id = adev->pdev->device;
1392 init_data.asic_id.vram_width = adev->gmc.vram_width;
1393 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1394 init_data.asic_id.atombios_base_address =
1395 adev->mode_info.atom_context->bios;
1397 init_data.driver = adev;
1399 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1401 if (!adev->dm.cgs_device) {
1402 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1406 init_data.cgs_device = adev->dm.cgs_device;
1408 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1410 switch (adev->asic_type) {
1413 init_data.flags.gpu_vm_support = true;
1416 switch (adev->ip_versions[DCE_HWIP][0]) {
1417 case IP_VERSION(2, 1, 0):
1418 init_data.flags.gpu_vm_support = true;
1419 switch (adev->dm.dmcub_fw_version) {
1420 case 0: /* development */
1421 case 0x1: /* linux-firmware.git hash 6d9f399 */
1422 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1423 init_data.flags.disable_dmcu = false;
1426 init_data.flags.disable_dmcu = true;
1429 case IP_VERSION(1, 0, 0):
1430 case IP_VERSION(1, 0, 1):
1431 case IP_VERSION(3, 0, 1):
1432 case IP_VERSION(3, 1, 2):
1433 case IP_VERSION(3, 1, 3):
1434 init_data.flags.gpu_vm_support = true;
1436 case IP_VERSION(2, 0, 3):
1437 init_data.flags.disable_dmcu = true;
1445 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1446 init_data.flags.fbc_support = true;
1448 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1449 init_data.flags.multi_mon_pp_mclk_switch = true;
1451 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1452 init_data.flags.disable_fractional_pwm = true;
1454 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1455 init_data.flags.edp_no_power_sequencing = true;
1457 init_data.flags.power_down_display_on_boot = true;
1459 INIT_LIST_HEAD(&adev->dm.da_list);
1460 /* Display Core create. */
1461 adev->dm.dc = dc_create(&init_data);
1464 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1466 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1470 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1471 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1472 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1475 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1476 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1477 if (dm_should_disable_stutter(adev->pdev))
1478 adev->dm.dc->debug.disable_stutter = true;
1480 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1481 adev->dm.dc->debug.disable_stutter = true;
1483 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1484 adev->dm.dc->debug.disable_dsc = true;
1486 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1487 adev->dm.dc->debug.disable_clock_gate = true;
1489 r = dm_dmub_hw_init(adev);
1491 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1495 dc_hardware_init(adev->dm.dc);
1497 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1498 if (!adev->dm.hpd_rx_offload_wq) {
1499 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1503 #if defined(CONFIG_DRM_AMD_DC_DCN)
1504 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1505 struct dc_phy_addr_space_config pa_config;
1507 mmhub_read_system_context(adev, &pa_config);
1509 // Call the DC init_memory func
1510 dc_setup_system_context(adev->dm.dc, &pa_config);
1514 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1515 if (!adev->dm.freesync_module) {
1517 "amdgpu: failed to initialize freesync_module.\n");
1519 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1520 adev->dm.freesync_module);
1522 amdgpu_dm_init_color_mod();
1524 #if defined(CONFIG_DRM_AMD_DC_DCN)
1525 if (adev->dm.dc->caps.max_links > 0) {
1526 adev->dm.vblank_control_workqueue =
1527 create_singlethread_workqueue("dm_vblank_control_workqueue");
1528 if (!adev->dm.vblank_control_workqueue)
1529 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1533 #ifdef CONFIG_DRM_AMD_DC_HDCP
1534 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1535 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1537 if (!adev->dm.hdcp_workqueue)
1538 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1540 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1542 dc_init_callbacks(adev->dm.dc, &init_params);
1545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1546 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1548 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1549 init_completion(&adev->dm.dmub_aux_transfer_done);
1550 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1551 if (!adev->dm.dmub_notify) {
1552 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1556 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1557 if (!adev->dm.delayed_hpd_wq) {
1558 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1562 amdgpu_dm_outbox_init(adev);
1563 #if defined(CONFIG_DRM_AMD_DC_DCN)
1564 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1565 dmub_aux_setconfig_callback, false)) {
1566 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1569 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1570 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1573 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1574 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1577 #endif /* CONFIG_DRM_AMD_DC_DCN */
1580 if (amdgpu_dm_initialize_drm_device(adev)) {
1582 "amdgpu: failed to initialize sw for display support.\n");
1586 /* create fake encoders for MST */
1587 dm_dp_create_fake_mst_encoders(adev);
1589 /* TODO: Add_display_info? */
1591 /* TODO use dynamic cursor width */
1592 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1593 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1595 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1597 "amdgpu: failed to initialize sw for display support.\n");
1602 DRM_DEBUG_DRIVER("KMS initialized.\n");
1606 amdgpu_dm_fini(adev);
1611 static int amdgpu_dm_early_fini(void *handle)
1613 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1615 amdgpu_dm_audio_fini(adev);
1620 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1624 #if defined(CONFIG_DRM_AMD_DC_DCN)
1625 if (adev->dm.vblank_control_workqueue) {
1626 destroy_workqueue(adev->dm.vblank_control_workqueue);
1627 adev->dm.vblank_control_workqueue = NULL;
1631 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1632 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1635 amdgpu_dm_destroy_drm_device(&adev->dm);
1637 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1638 if (adev->dm.crc_rd_wrk) {
1639 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1640 kfree(adev->dm.crc_rd_wrk);
1641 adev->dm.crc_rd_wrk = NULL;
1644 #ifdef CONFIG_DRM_AMD_DC_HDCP
1645 if (adev->dm.hdcp_workqueue) {
1646 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1647 adev->dm.hdcp_workqueue = NULL;
1651 dc_deinit_callbacks(adev->dm.dc);
1654 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1656 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1657 kfree(adev->dm.dmub_notify);
1658 adev->dm.dmub_notify = NULL;
1659 destroy_workqueue(adev->dm.delayed_hpd_wq);
1660 adev->dm.delayed_hpd_wq = NULL;
1663 if (adev->dm.dmub_bo)
1664 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1665 &adev->dm.dmub_bo_gpu_addr,
1666 &adev->dm.dmub_bo_cpu_addr);
1668 if (adev->dm.hpd_rx_offload_wq) {
1669 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1670 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1671 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1672 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1676 kfree(adev->dm.hpd_rx_offload_wq);
1677 adev->dm.hpd_rx_offload_wq = NULL;
1680 /* DC Destroy TODO: Replace destroy DAL */
1682 dc_destroy(&adev->dm.dc);
1684 * TODO: pageflip, vlank interrupt
1686 * amdgpu_dm_irq_fini(adev);
1689 if (adev->dm.cgs_device) {
1690 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1691 adev->dm.cgs_device = NULL;
1693 if (adev->dm.freesync_module) {
1694 mod_freesync_destroy(adev->dm.freesync_module);
1695 adev->dm.freesync_module = NULL;
1698 mutex_destroy(&adev->dm.audio_lock);
1699 mutex_destroy(&adev->dm.dc_lock);
1704 static int load_dmcu_fw(struct amdgpu_device *adev)
1706 const char *fw_name_dmcu = NULL;
1708 const struct dmcu_firmware_header_v1_0 *hdr;
1710 switch(adev->asic_type) {
1711 #if defined(CONFIG_DRM_AMD_DC_SI)
1726 case CHIP_POLARIS11:
1727 case CHIP_POLARIS10:
1728 case CHIP_POLARIS12:
1735 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1738 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1739 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1740 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1741 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1746 switch (adev->ip_versions[DCE_HWIP][0]) {
1747 case IP_VERSION(2, 0, 2):
1748 case IP_VERSION(2, 0, 3):
1749 case IP_VERSION(2, 0, 0):
1750 case IP_VERSION(2, 1, 0):
1751 case IP_VERSION(3, 0, 0):
1752 case IP_VERSION(3, 0, 2):
1753 case IP_VERSION(3, 0, 3):
1754 case IP_VERSION(3, 0, 1):
1755 case IP_VERSION(3, 1, 2):
1756 case IP_VERSION(3, 1, 3):
1761 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1765 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1766 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1770 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1772 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1773 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1774 adev->dm.fw_dmcu = NULL;
1778 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1783 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1785 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1787 release_firmware(adev->dm.fw_dmcu);
1788 adev->dm.fw_dmcu = NULL;
1792 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1793 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1794 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1795 adev->firmware.fw_size +=
1796 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1798 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1799 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1800 adev->firmware.fw_size +=
1801 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1803 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1805 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1810 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1812 struct amdgpu_device *adev = ctx;
1814 return dm_read_reg(adev->dm.dc->ctx, address);
1817 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1820 struct amdgpu_device *adev = ctx;
1822 return dm_write_reg(adev->dm.dc->ctx, address, value);
1825 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1827 struct dmub_srv_create_params create_params;
1828 struct dmub_srv_region_params region_params;
1829 struct dmub_srv_region_info region_info;
1830 struct dmub_srv_fb_params fb_params;
1831 struct dmub_srv_fb_info *fb_info;
1832 struct dmub_srv *dmub_srv;
1833 const struct dmcub_firmware_header_v1_0 *hdr;
1834 const char *fw_name_dmub;
1835 enum dmub_asic dmub_asic;
1836 enum dmub_status status;
1839 switch (adev->ip_versions[DCE_HWIP][0]) {
1840 case IP_VERSION(2, 1, 0):
1841 dmub_asic = DMUB_ASIC_DCN21;
1842 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1843 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1844 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1846 case IP_VERSION(3, 0, 0):
1847 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1848 dmub_asic = DMUB_ASIC_DCN30;
1849 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1851 dmub_asic = DMUB_ASIC_DCN30;
1852 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1855 case IP_VERSION(3, 0, 1):
1856 dmub_asic = DMUB_ASIC_DCN301;
1857 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1859 case IP_VERSION(3, 0, 2):
1860 dmub_asic = DMUB_ASIC_DCN302;
1861 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1863 case IP_VERSION(3, 0, 3):
1864 dmub_asic = DMUB_ASIC_DCN303;
1865 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1867 case IP_VERSION(3, 1, 2):
1868 case IP_VERSION(3, 1, 3):
1869 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1870 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1874 /* ASIC doesn't support DMUB. */
1878 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1880 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1884 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1886 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1890 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1891 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1893 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1894 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1895 AMDGPU_UCODE_ID_DMCUB;
1896 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1898 adev->firmware.fw_size +=
1899 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1901 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1902 adev->dm.dmcub_fw_version);
1906 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1907 dmub_srv = adev->dm.dmub_srv;
1910 DRM_ERROR("Failed to allocate DMUB service!\n");
1914 memset(&create_params, 0, sizeof(create_params));
1915 create_params.user_ctx = adev;
1916 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1917 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1918 create_params.asic = dmub_asic;
1920 /* Create the DMUB service. */
1921 status = dmub_srv_create(dmub_srv, &create_params);
1922 if (status != DMUB_STATUS_OK) {
1923 DRM_ERROR("Error creating DMUB service: %d\n", status);
1927 /* Calculate the size of all the regions for the DMUB service. */
1928 memset(®ion_params, 0, sizeof(region_params));
1930 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1931 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1932 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1933 region_params.vbios_size = adev->bios_size;
1934 region_params.fw_bss_data = region_params.bss_data_size ?
1935 adev->dm.dmub_fw->data +
1936 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1937 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1938 region_params.fw_inst_const =
1939 adev->dm.dmub_fw->data +
1940 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1943 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1946 if (status != DMUB_STATUS_OK) {
1947 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1952 * Allocate a framebuffer based on the total size of all the regions.
1953 * TODO: Move this into GART.
1955 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1956 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1957 &adev->dm.dmub_bo_gpu_addr,
1958 &adev->dm.dmub_bo_cpu_addr);
1962 /* Rebase the regions on the framebuffer address. */
1963 memset(&fb_params, 0, sizeof(fb_params));
1964 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1965 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1966 fb_params.region_info = ®ion_info;
1968 adev->dm.dmub_fb_info =
1969 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1970 fb_info = adev->dm.dmub_fb_info;
1974 "Failed to allocate framebuffer info for DMUB service!\n");
1978 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1979 if (status != DMUB_STATUS_OK) {
1980 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1987 static int dm_sw_init(void *handle)
1989 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1992 r = dm_dmub_sw_init(adev);
1996 return load_dmcu_fw(adev);
1999 static int dm_sw_fini(void *handle)
2001 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2003 kfree(adev->dm.dmub_fb_info);
2004 adev->dm.dmub_fb_info = NULL;
2006 if (adev->dm.dmub_srv) {
2007 dmub_srv_destroy(adev->dm.dmub_srv);
2008 adev->dm.dmub_srv = NULL;
2011 release_firmware(adev->dm.dmub_fw);
2012 adev->dm.dmub_fw = NULL;
2014 release_firmware(adev->dm.fw_dmcu);
2015 adev->dm.fw_dmcu = NULL;
2020 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2022 struct amdgpu_dm_connector *aconnector;
2023 struct drm_connector *connector;
2024 struct drm_connector_list_iter iter;
2027 drm_connector_list_iter_begin(dev, &iter);
2028 drm_for_each_connector_iter(connector, &iter) {
2029 aconnector = to_amdgpu_dm_connector(connector);
2030 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2031 aconnector->mst_mgr.aux) {
2032 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2034 aconnector->base.base.id);
2036 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2038 DRM_ERROR("DM_MST: Failed to start MST\n");
2039 aconnector->dc_link->type =
2040 dc_connection_single;
2045 drm_connector_list_iter_end(&iter);
2050 static int dm_late_init(void *handle)
2052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2054 struct dmcu_iram_parameters params;
2055 unsigned int linear_lut[16];
2057 struct dmcu *dmcu = NULL;
2059 dmcu = adev->dm.dc->res_pool->dmcu;
2061 for (i = 0; i < 16; i++)
2062 linear_lut[i] = 0xFFFF * i / 15;
2065 params.backlight_ramping_override = false;
2066 params.backlight_ramping_start = 0xCCCC;
2067 params.backlight_ramping_reduction = 0xCCCCCCCC;
2068 params.backlight_lut_array_size = 16;
2069 params.backlight_lut_array = linear_lut;
2071 /* Min backlight level after ABM reduction, Don't allow below 1%
2072 * 0xFFFF x 0.01 = 0x28F
2074 params.min_abm_backlight = 0x28F;
2075 /* In the case where abm is implemented on dmcub,
2076 * dmcu object will be null.
2077 * ABM 2.4 and up are implemented on dmcub.
2080 if (!dmcu_load_iram(dmcu, params))
2082 } else if (adev->dm.dc->ctx->dmub_srv) {
2083 struct dc_link *edp_links[MAX_NUM_EDP];
2086 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2087 for (i = 0; i < edp_num; i++) {
2088 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2093 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2096 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2098 struct amdgpu_dm_connector *aconnector;
2099 struct drm_connector *connector;
2100 struct drm_connector_list_iter iter;
2101 struct drm_dp_mst_topology_mgr *mgr;
2103 bool need_hotplug = false;
2105 drm_connector_list_iter_begin(dev, &iter);
2106 drm_for_each_connector_iter(connector, &iter) {
2107 aconnector = to_amdgpu_dm_connector(connector);
2108 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2109 aconnector->mst_port)
2112 mgr = &aconnector->mst_mgr;
2115 drm_dp_mst_topology_mgr_suspend(mgr);
2117 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2119 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2120 need_hotplug = true;
2124 drm_connector_list_iter_end(&iter);
2127 drm_kms_helper_hotplug_event(dev);
2130 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2132 struct smu_context *smu = &adev->smu;
2135 if (!is_support_sw_smu(adev))
2138 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2139 * on window driver dc implementation.
2140 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2141 * should be passed to smu during boot up and resume from s3.
2142 * boot up: dc calculate dcn watermark clock settings within dc_create,
2143 * dcn20_resource_construct
2144 * then call pplib functions below to pass the settings to smu:
2145 * smu_set_watermarks_for_clock_ranges
2146 * smu_set_watermarks_table
2147 * navi10_set_watermarks_table
2148 * smu_write_watermarks_table
2150 * For Renoir, clock settings of dcn watermark are also fixed values.
2151 * dc has implemented different flow for window driver:
2152 * dc_hardware_init / dc_set_power_state
2157 * smu_set_watermarks_for_clock_ranges
2158 * renoir_set_watermarks_table
2159 * smu_write_watermarks_table
2162 * dc_hardware_init -> amdgpu_dm_init
2163 * dc_set_power_state --> dm_resume
2165 * therefore, this function apply to navi10/12/14 but not Renoir
2168 switch (adev->ip_versions[DCE_HWIP][0]) {
2169 case IP_VERSION(2, 0, 2):
2170 case IP_VERSION(2, 0, 0):
2176 ret = smu_write_watermarks_table(smu);
2178 DRM_ERROR("Failed to update WMTABLE!\n");
2186 * dm_hw_init() - Initialize DC device
2187 * @handle: The base driver device containing the amdgpu_dm device.
2189 * Initialize the &struct amdgpu_display_manager device. This involves calling
2190 * the initializers of each DM component, then populating the struct with them.
2192 * Although the function implies hardware initialization, both hardware and
2193 * software are initialized here. Splitting them out to their relevant init
2194 * hooks is a future TODO item.
2196 * Some notable things that are initialized here:
2198 * - Display Core, both software and hardware
2199 * - DC modules that we need (freesync and color management)
2200 * - DRM software states
2201 * - Interrupt sources and handlers
2203 * - Debug FS entries, if enabled
2205 static int dm_hw_init(void *handle)
2207 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2208 /* Create DAL display manager */
2209 amdgpu_dm_init(adev);
2210 amdgpu_dm_hpd_init(adev);
2216 * dm_hw_fini() - Teardown DC device
2217 * @handle: The base driver device containing the amdgpu_dm device.
2219 * Teardown components within &struct amdgpu_display_manager that require
2220 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2221 * were loaded. Also flush IRQ workqueues and disable them.
2223 static int dm_hw_fini(void *handle)
2225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2227 amdgpu_dm_hpd_fini(adev);
2229 amdgpu_dm_irq_fini(adev);
2230 amdgpu_dm_fini(adev);
2235 static int dm_enable_vblank(struct drm_crtc *crtc);
2236 static void dm_disable_vblank(struct drm_crtc *crtc);
2238 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2239 struct dc_state *state, bool enable)
2241 enum dc_irq_source irq_source;
2242 struct amdgpu_crtc *acrtc;
2246 for (i = 0; i < state->stream_count; i++) {
2247 acrtc = get_crtc_by_otg_inst(
2248 adev, state->stream_status[i].primary_otg_inst);
2250 if (acrtc && state->stream_status[i].plane_count != 0) {
2251 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2252 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2253 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2254 acrtc->crtc_id, enable ? "en" : "dis", rc);
2256 DRM_WARN("Failed to %s pflip interrupts\n",
2257 enable ? "enable" : "disable");
2260 rc = dm_enable_vblank(&acrtc->base);
2262 DRM_WARN("Failed to enable vblank interrupts\n");
2264 dm_disable_vblank(&acrtc->base);
2272 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2274 struct dc_state *context = NULL;
2275 enum dc_status res = DC_ERROR_UNEXPECTED;
2277 struct dc_stream_state *del_streams[MAX_PIPES];
2278 int del_streams_count = 0;
2280 memset(del_streams, 0, sizeof(del_streams));
2282 context = dc_create_state(dc);
2283 if (context == NULL)
2284 goto context_alloc_fail;
2286 dc_resource_state_copy_construct_current(dc, context);
2288 /* First remove from context all streams */
2289 for (i = 0; i < context->stream_count; i++) {
2290 struct dc_stream_state *stream = context->streams[i];
2292 del_streams[del_streams_count++] = stream;
2295 /* Remove all planes for removed streams and then remove the streams */
2296 for (i = 0; i < del_streams_count; i++) {
2297 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2298 res = DC_FAIL_DETACH_SURFACES;
2302 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2308 res = dc_validate_global_state(dc, context, false);
2311 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2315 res = dc_commit_state(dc, context);
2318 dc_release_state(context);
2324 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2328 if (dm->hpd_rx_offload_wq) {
2329 for (i = 0; i < dm->dc->caps.max_links; i++)
2330 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2334 static int dm_suspend(void *handle)
2336 struct amdgpu_device *adev = handle;
2337 struct amdgpu_display_manager *dm = &adev->dm;
2340 if (amdgpu_in_reset(adev)) {
2341 mutex_lock(&dm->dc_lock);
2343 #if defined(CONFIG_DRM_AMD_DC_DCN)
2344 dc_allow_idle_optimizations(adev->dm.dc, false);
2347 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2349 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2351 amdgpu_dm_commit_zero_streams(dm->dc);
2353 amdgpu_dm_irq_suspend(adev);
2355 hpd_rx_irq_work_suspend(dm);
2360 WARN_ON(adev->dm.cached_state);
2361 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2363 s3_handle_mst(adev_to_drm(adev), true);
2365 amdgpu_dm_irq_suspend(adev);
2367 hpd_rx_irq_work_suspend(dm);
2369 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2374 static struct amdgpu_dm_connector *
2375 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2376 struct drm_crtc *crtc)
2379 struct drm_connector_state *new_con_state;
2380 struct drm_connector *connector;
2381 struct drm_crtc *crtc_from_state;
2383 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2384 crtc_from_state = new_con_state->crtc;
2386 if (crtc_from_state == crtc)
2387 return to_amdgpu_dm_connector(connector);
2393 static void emulated_link_detect(struct dc_link *link)
2395 struct dc_sink_init_data sink_init_data = { 0 };
2396 struct display_sink_capability sink_caps = { 0 };
2397 enum dc_edid_status edid_status;
2398 struct dc_context *dc_ctx = link->ctx;
2399 struct dc_sink *sink = NULL;
2400 struct dc_sink *prev_sink = NULL;
2402 link->type = dc_connection_none;
2403 prev_sink = link->local_sink;
2406 dc_sink_release(prev_sink);
2408 switch (link->connector_signal) {
2409 case SIGNAL_TYPE_HDMI_TYPE_A: {
2410 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2411 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2415 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2416 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2417 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2421 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2422 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2423 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2427 case SIGNAL_TYPE_LVDS: {
2428 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2429 sink_caps.signal = SIGNAL_TYPE_LVDS;
2433 case SIGNAL_TYPE_EDP: {
2434 sink_caps.transaction_type =
2435 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2436 sink_caps.signal = SIGNAL_TYPE_EDP;
2440 case SIGNAL_TYPE_DISPLAY_PORT: {
2441 sink_caps.transaction_type =
2442 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2443 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2448 DC_ERROR("Invalid connector type! signal:%d\n",
2449 link->connector_signal);
2453 sink_init_data.link = link;
2454 sink_init_data.sink_signal = sink_caps.signal;
2456 sink = dc_sink_create(&sink_init_data);
2458 DC_ERROR("Failed to create sink!\n");
2462 /* dc_sink_create returns a new reference */
2463 link->local_sink = sink;
2465 edid_status = dm_helpers_read_local_edid(
2470 if (edid_status != EDID_OK)
2471 DC_ERROR("Failed to read EDID");
2475 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2476 struct amdgpu_display_manager *dm)
2479 struct dc_surface_update surface_updates[MAX_SURFACES];
2480 struct dc_plane_info plane_infos[MAX_SURFACES];
2481 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2482 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2483 struct dc_stream_update stream_update;
2487 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2490 dm_error("Failed to allocate update bundle\n");
2494 for (k = 0; k < dc_state->stream_count; k++) {
2495 bundle->stream_update.stream = dc_state->streams[k];
2497 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2498 bundle->surface_updates[m].surface =
2499 dc_state->stream_status->plane_states[m];
2500 bundle->surface_updates[m].surface->force_full_update =
2503 dc_commit_updates_for_stream(
2504 dm->dc, bundle->surface_updates,
2505 dc_state->stream_status->plane_count,
2506 dc_state->streams[k], &bundle->stream_update, dc_state);
2515 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2517 struct dc_stream_state *stream_state;
2518 struct amdgpu_dm_connector *aconnector = link->priv;
2519 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2520 struct dc_stream_update stream_update;
2521 bool dpms_off = true;
2523 memset(&stream_update, 0, sizeof(stream_update));
2524 stream_update.dpms_off = &dpms_off;
2526 mutex_lock(&adev->dm.dc_lock);
2527 stream_state = dc_stream_find_from_link(link);
2529 if (stream_state == NULL) {
2530 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2531 mutex_unlock(&adev->dm.dc_lock);
2535 stream_update.stream = stream_state;
2536 acrtc_state->force_dpms_off = true;
2537 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2538 stream_state, &stream_update,
2539 stream_state->ctx->dc->current_state);
2540 mutex_unlock(&adev->dm.dc_lock);
2543 static int dm_resume(void *handle)
2545 struct amdgpu_device *adev = handle;
2546 struct drm_device *ddev = adev_to_drm(adev);
2547 struct amdgpu_display_manager *dm = &adev->dm;
2548 struct amdgpu_dm_connector *aconnector;
2549 struct drm_connector *connector;
2550 struct drm_connector_list_iter iter;
2551 struct drm_crtc *crtc;
2552 struct drm_crtc_state *new_crtc_state;
2553 struct dm_crtc_state *dm_new_crtc_state;
2554 struct drm_plane *plane;
2555 struct drm_plane_state *new_plane_state;
2556 struct dm_plane_state *dm_new_plane_state;
2557 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2558 enum dc_connection_type new_connection_type = dc_connection_none;
2559 struct dc_state *dc_state;
2562 if (amdgpu_in_reset(adev)) {
2563 dc_state = dm->cached_dc_state;
2566 * The dc->current_state is backed up into dm->cached_dc_state
2567 * before we commit 0 streams.
2569 * DC will clear link encoder assignments on the real state
2570 * but the changes won't propagate over to the copy we made
2571 * before the 0 streams commit.
2573 * DC expects that link encoder assignments are *not* valid
2574 * when committing a state, so as a workaround it needs to be
2577 link_enc_cfg_init(dm->dc, dc_state);
2579 amdgpu_dm_outbox_init(adev);
2581 r = dm_dmub_hw_init(adev);
2583 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2585 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2588 amdgpu_dm_irq_resume_early(adev);
2590 for (i = 0; i < dc_state->stream_count; i++) {
2591 dc_state->streams[i]->mode_changed = true;
2592 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2593 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2597 #if defined(CONFIG_DRM_AMD_DC_DCN)
2599 * Resource allocation happens for link encoders for newer ASIC in
2600 * dc_validate_global_state, so we need to revalidate it.
2602 * This shouldn't fail (it passed once before), so warn if it does.
2604 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2607 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2609 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2611 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2613 dc_release_state(dm->cached_dc_state);
2614 dm->cached_dc_state = NULL;
2616 amdgpu_dm_irq_resume_late(adev);
2618 mutex_unlock(&dm->dc_lock);
2622 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2623 dc_release_state(dm_state->context);
2624 dm_state->context = dc_create_state(dm->dc);
2625 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2626 dc_resource_state_construct(dm->dc, dm_state->context);
2628 /* Before powering on DC we need to re-initialize DMUB. */
2629 r = dm_dmub_hw_init(adev);
2631 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2633 /* power on hardware */
2634 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2636 /* program HPD filter */
2640 * early enable HPD Rx IRQ, should be done before set mode as short
2641 * pulse interrupts are used for MST
2643 amdgpu_dm_irq_resume_early(adev);
2645 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2646 s3_handle_mst(ddev, false);
2649 drm_connector_list_iter_begin(ddev, &iter);
2650 drm_for_each_connector_iter(connector, &iter) {
2651 aconnector = to_amdgpu_dm_connector(connector);
2654 * this is the case when traversing through already created
2655 * MST connectors, should be skipped
2657 if (aconnector->mst_port)
2660 mutex_lock(&aconnector->hpd_lock);
2661 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2662 DRM_ERROR("KMS: Failed to detect connector\n");
2664 if (aconnector->base.force && new_connection_type == dc_connection_none)
2665 emulated_link_detect(aconnector->dc_link);
2667 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2669 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2670 aconnector->fake_enable = false;
2672 if (aconnector->dc_sink)
2673 dc_sink_release(aconnector->dc_sink);
2674 aconnector->dc_sink = NULL;
2675 amdgpu_dm_update_connector_after_detect(aconnector);
2676 mutex_unlock(&aconnector->hpd_lock);
2678 drm_connector_list_iter_end(&iter);
2680 /* Force mode set in atomic commit */
2681 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2682 new_crtc_state->active_changed = true;
2685 * atomic_check is expected to create the dc states. We need to release
2686 * them here, since they were duplicated as part of the suspend
2689 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2690 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2691 if (dm_new_crtc_state->stream) {
2692 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2693 dc_stream_release(dm_new_crtc_state->stream);
2694 dm_new_crtc_state->stream = NULL;
2698 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2699 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2700 if (dm_new_plane_state->dc_state) {
2701 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2702 dc_plane_state_release(dm_new_plane_state->dc_state);
2703 dm_new_plane_state->dc_state = NULL;
2707 drm_atomic_helper_resume(ddev, dm->cached_state);
2709 dm->cached_state = NULL;
2711 amdgpu_dm_irq_resume_late(adev);
2713 amdgpu_dm_smu_write_watermarks_table(adev);
2721 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2722 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2723 * the base driver's device list to be initialized and torn down accordingly.
2725 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2728 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2730 .early_init = dm_early_init,
2731 .late_init = dm_late_init,
2732 .sw_init = dm_sw_init,
2733 .sw_fini = dm_sw_fini,
2734 .early_fini = amdgpu_dm_early_fini,
2735 .hw_init = dm_hw_init,
2736 .hw_fini = dm_hw_fini,
2737 .suspend = dm_suspend,
2738 .resume = dm_resume,
2739 .is_idle = dm_is_idle,
2740 .wait_for_idle = dm_wait_for_idle,
2741 .check_soft_reset = dm_check_soft_reset,
2742 .soft_reset = dm_soft_reset,
2743 .set_clockgating_state = dm_set_clockgating_state,
2744 .set_powergating_state = dm_set_powergating_state,
2747 const struct amdgpu_ip_block_version dm_ip_block =
2749 .type = AMD_IP_BLOCK_TYPE_DCE,
2753 .funcs = &amdgpu_dm_funcs,
2763 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2764 .fb_create = amdgpu_display_user_framebuffer_create,
2765 .get_format_info = amd_get_format_info,
2766 .output_poll_changed = drm_fb_helper_output_poll_changed,
2767 .atomic_check = amdgpu_dm_atomic_check,
2768 .atomic_commit = drm_atomic_helper_commit,
2771 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2772 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2775 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2777 u32 max_cll, min_cll, max, min, q, r;
2778 struct amdgpu_dm_backlight_caps *caps;
2779 struct amdgpu_display_manager *dm;
2780 struct drm_connector *conn_base;
2781 struct amdgpu_device *adev;
2782 struct dc_link *link = NULL;
2783 static const u8 pre_computed_values[] = {
2784 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2785 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2788 if (!aconnector || !aconnector->dc_link)
2791 link = aconnector->dc_link;
2792 if (link->connector_signal != SIGNAL_TYPE_EDP)
2795 conn_base = &aconnector->base;
2796 adev = drm_to_adev(conn_base->dev);
2798 for (i = 0; i < dm->num_of_edps; i++) {
2799 if (link == dm->backlight_link[i])
2802 if (i >= dm->num_of_edps)
2804 caps = &dm->backlight_caps[i];
2805 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2806 caps->aux_support = false;
2807 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2808 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2810 if (caps->ext_caps->bits.oled == 1 /*||
2811 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2812 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2813 caps->aux_support = true;
2815 if (amdgpu_backlight == 0)
2816 caps->aux_support = false;
2817 else if (amdgpu_backlight == 1)
2818 caps->aux_support = true;
2820 /* From the specification (CTA-861-G), for calculating the maximum
2821 * luminance we need to use:
2822 * Luminance = 50*2**(CV/32)
2823 * Where CV is a one-byte value.
2824 * For calculating this expression we may need float point precision;
2825 * to avoid this complexity level, we take advantage that CV is divided
2826 * by a constant. From the Euclids division algorithm, we know that CV
2827 * can be written as: CV = 32*q + r. Next, we replace CV in the
2828 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2829 * need to pre-compute the value of r/32. For pre-computing the values
2830 * We just used the following Ruby line:
2831 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2832 * The results of the above expressions can be verified at
2833 * pre_computed_values.
2837 max = (1 << q) * pre_computed_values[r];
2839 // min luminance: maxLum * (CV/255)^2 / 100
2840 q = DIV_ROUND_CLOSEST(min_cll, 255);
2841 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2843 caps->aux_max_input_signal = max;
2844 caps->aux_min_input_signal = min;
2847 void amdgpu_dm_update_connector_after_detect(
2848 struct amdgpu_dm_connector *aconnector)
2850 struct drm_connector *connector = &aconnector->base;
2851 struct drm_device *dev = connector->dev;
2852 struct dc_sink *sink;
2854 /* MST handled by drm_mst framework */
2855 if (aconnector->mst_mgr.mst_state == true)
2858 sink = aconnector->dc_link->local_sink;
2860 dc_sink_retain(sink);
2863 * Edid mgmt connector gets first update only in mode_valid hook and then
2864 * the connector sink is set to either fake or physical sink depends on link status.
2865 * Skip if already done during boot.
2867 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2868 && aconnector->dc_em_sink) {
2871 * For S3 resume with headless use eml_sink to fake stream
2872 * because on resume connector->sink is set to NULL
2874 mutex_lock(&dev->mode_config.mutex);
2877 if (aconnector->dc_sink) {
2878 amdgpu_dm_update_freesync_caps(connector, NULL);
2880 * retain and release below are used to
2881 * bump up refcount for sink because the link doesn't point
2882 * to it anymore after disconnect, so on next crtc to connector
2883 * reshuffle by UMD we will get into unwanted dc_sink release
2885 dc_sink_release(aconnector->dc_sink);
2887 aconnector->dc_sink = sink;
2888 dc_sink_retain(aconnector->dc_sink);
2889 amdgpu_dm_update_freesync_caps(connector,
2892 amdgpu_dm_update_freesync_caps(connector, NULL);
2893 if (!aconnector->dc_sink) {
2894 aconnector->dc_sink = aconnector->dc_em_sink;
2895 dc_sink_retain(aconnector->dc_sink);
2899 mutex_unlock(&dev->mode_config.mutex);
2902 dc_sink_release(sink);
2907 * TODO: temporary guard to look for proper fix
2908 * if this sink is MST sink, we should not do anything
2910 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2911 dc_sink_release(sink);
2915 if (aconnector->dc_sink == sink) {
2917 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2920 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2921 aconnector->connector_id);
2923 dc_sink_release(sink);
2927 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2928 aconnector->connector_id, aconnector->dc_sink, sink);
2930 mutex_lock(&dev->mode_config.mutex);
2933 * 1. Update status of the drm connector
2934 * 2. Send an event and let userspace tell us what to do
2938 * TODO: check if we still need the S3 mode update workaround.
2939 * If yes, put it here.
2941 if (aconnector->dc_sink) {
2942 amdgpu_dm_update_freesync_caps(connector, NULL);
2943 dc_sink_release(aconnector->dc_sink);
2946 aconnector->dc_sink = sink;
2947 dc_sink_retain(aconnector->dc_sink);
2948 if (sink->dc_edid.length == 0) {
2949 aconnector->edid = NULL;
2950 if (aconnector->dc_link->aux_mode) {
2951 drm_dp_cec_unset_edid(
2952 &aconnector->dm_dp_aux.aux);
2956 (struct edid *)sink->dc_edid.raw_edid;
2958 drm_connector_update_edid_property(connector,
2960 if (aconnector->dc_link->aux_mode)
2961 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2965 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2966 update_connector_ext_caps(aconnector);
2968 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2969 amdgpu_dm_update_freesync_caps(connector, NULL);
2970 drm_connector_update_edid_property(connector, NULL);
2971 aconnector->num_modes = 0;
2972 dc_sink_release(aconnector->dc_sink);
2973 aconnector->dc_sink = NULL;
2974 aconnector->edid = NULL;
2975 #ifdef CONFIG_DRM_AMD_DC_HDCP
2976 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2977 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2978 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2982 mutex_unlock(&dev->mode_config.mutex);
2984 update_subconnector_property(aconnector);
2987 dc_sink_release(sink);
2990 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2992 struct drm_connector *connector = &aconnector->base;
2993 struct drm_device *dev = connector->dev;
2994 enum dc_connection_type new_connection_type = dc_connection_none;
2995 struct amdgpu_device *adev = drm_to_adev(dev);
2996 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2997 struct dm_crtc_state *dm_crtc_state = NULL;
2999 if (adev->dm.disable_hpd_irq)
3002 if (dm_con_state->base.state && dm_con_state->base.crtc)
3003 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3004 dm_con_state->base.state,
3005 dm_con_state->base.crtc));
3007 * In case of failure or MST no need to update connector status or notify the OS
3008 * since (for MST case) MST does this in its own context.
3010 mutex_lock(&aconnector->hpd_lock);
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013 if (adev->dm.hdcp_workqueue) {
3014 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3015 dm_con_state->update_hdcp = true;
3018 if (aconnector->fake_enable)
3019 aconnector->fake_enable = false;
3021 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3022 DRM_ERROR("KMS: Failed to detect connector\n");
3024 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3025 emulated_link_detect(aconnector->dc_link);
3027 drm_modeset_lock_all(dev);
3028 dm_restore_drm_connector_state(dev, connector);
3029 drm_modeset_unlock_all(dev);
3031 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3032 drm_kms_helper_hotplug_event(dev);
3034 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3035 if (new_connection_type == dc_connection_none &&
3036 aconnector->dc_link->type == dc_connection_none &&
3038 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3040 amdgpu_dm_update_connector_after_detect(aconnector);
3042 drm_modeset_lock_all(dev);
3043 dm_restore_drm_connector_state(dev, connector);
3044 drm_modeset_unlock_all(dev);
3046 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3047 drm_kms_helper_hotplug_event(dev);
3049 mutex_unlock(&aconnector->hpd_lock);
3053 static void handle_hpd_irq(void *param)
3055 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3057 handle_hpd_irq_helper(aconnector);
3061 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3063 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3065 bool new_irq_handled = false;
3067 int dpcd_bytes_to_read;
3069 const int max_process_count = 30;
3070 int process_count = 0;
3072 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3074 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3075 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3076 /* DPCD 0x200 - 0x201 for downstream IRQ */
3077 dpcd_addr = DP_SINK_COUNT;
3079 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3080 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3081 dpcd_addr = DP_SINK_COUNT_ESI;
3084 dret = drm_dp_dpcd_read(
3085 &aconnector->dm_dp_aux.aux,
3088 dpcd_bytes_to_read);
3090 while (dret == dpcd_bytes_to_read &&
3091 process_count < max_process_count) {
3097 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3098 /* handle HPD short pulse irq */
3099 if (aconnector->mst_mgr.mst_state)
3101 &aconnector->mst_mgr,
3105 if (new_irq_handled) {
3106 /* ACK at DPCD to notify down stream */
3107 const int ack_dpcd_bytes_to_write =
3108 dpcd_bytes_to_read - 1;
3110 for (retry = 0; retry < 3; retry++) {
3113 wret = drm_dp_dpcd_write(
3114 &aconnector->dm_dp_aux.aux,
3117 ack_dpcd_bytes_to_write);
3118 if (wret == ack_dpcd_bytes_to_write)
3122 /* check if there is new irq to be handled */
3123 dret = drm_dp_dpcd_read(
3124 &aconnector->dm_dp_aux.aux,
3127 dpcd_bytes_to_read);
3129 new_irq_handled = false;
3135 if (process_count == max_process_count)
3136 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3139 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3140 union hpd_irq_data hpd_irq_data)
3142 struct hpd_rx_irq_offload_work *offload_work =
3143 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3145 if (!offload_work) {
3146 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3150 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3151 offload_work->data = hpd_irq_data;
3152 offload_work->offload_wq = offload_wq;
3154 queue_work(offload_wq->wq, &offload_work->work);
3155 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3158 static void handle_hpd_rx_irq(void *param)
3160 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3161 struct drm_connector *connector = &aconnector->base;
3162 struct drm_device *dev = connector->dev;
3163 struct dc_link *dc_link = aconnector->dc_link;
3164 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3165 bool result = false;
3166 enum dc_connection_type new_connection_type = dc_connection_none;
3167 struct amdgpu_device *adev = drm_to_adev(dev);
3168 union hpd_irq_data hpd_irq_data;
3169 bool link_loss = false;
3170 bool has_left_work = false;
3171 int idx = aconnector->base.index;
3172 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3174 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3176 if (adev->dm.disable_hpd_irq)
3180 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3181 * conflict, after implement i2c helper, this mutex should be
3184 mutex_lock(&aconnector->hpd_lock);
3186 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3187 &link_loss, true, &has_left_work);
3192 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3193 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3197 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3198 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3199 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3200 dm_handle_mst_sideband_msg(aconnector);
3207 spin_lock(&offload_wq->offload_lock);
3208 skip = offload_wq->is_handling_link_loss;
3211 offload_wq->is_handling_link_loss = true;
3213 spin_unlock(&offload_wq->offload_lock);
3216 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3223 if (result && !is_mst_root_connector) {
3224 /* Downstream Port status changed. */
3225 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3226 DRM_ERROR("KMS: Failed to detect connector\n");
3228 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3229 emulated_link_detect(dc_link);
3231 if (aconnector->fake_enable)
3232 aconnector->fake_enable = false;
3234 amdgpu_dm_update_connector_after_detect(aconnector);
3237 drm_modeset_lock_all(dev);
3238 dm_restore_drm_connector_state(dev, connector);
3239 drm_modeset_unlock_all(dev);
3241 drm_kms_helper_hotplug_event(dev);
3242 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3244 if (aconnector->fake_enable)
3245 aconnector->fake_enable = false;
3247 amdgpu_dm_update_connector_after_detect(aconnector);
3250 drm_modeset_lock_all(dev);
3251 dm_restore_drm_connector_state(dev, connector);
3252 drm_modeset_unlock_all(dev);
3254 drm_kms_helper_hotplug_event(dev);
3257 #ifdef CONFIG_DRM_AMD_DC_HDCP
3258 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3259 if (adev->dm.hdcp_workqueue)
3260 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3264 if (dc_link->type != dc_connection_mst_branch)
3265 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3267 mutex_unlock(&aconnector->hpd_lock);
3270 static void register_hpd_handlers(struct amdgpu_device *adev)
3272 struct drm_device *dev = adev_to_drm(adev);
3273 struct drm_connector *connector;
3274 struct amdgpu_dm_connector *aconnector;
3275 const struct dc_link *dc_link;
3276 struct dc_interrupt_params int_params = {0};
3278 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3279 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3281 list_for_each_entry(connector,
3282 &dev->mode_config.connector_list, head) {
3284 aconnector = to_amdgpu_dm_connector(connector);
3285 dc_link = aconnector->dc_link;
3287 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3288 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3289 int_params.irq_source = dc_link->irq_source_hpd;
3291 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3293 (void *) aconnector);
3296 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3298 /* Also register for DP short pulse (hpd_rx). */
3299 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3300 int_params.irq_source = dc_link->irq_source_hpd_rx;
3302 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3304 (void *) aconnector);
3306 if (adev->dm.hpd_rx_offload_wq)
3307 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3313 #if defined(CONFIG_DRM_AMD_DC_SI)
3314 /* Register IRQ sources and initialize IRQ callbacks */
3315 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3317 struct dc *dc = adev->dm.dc;
3318 struct common_irq_params *c_irq_params;
3319 struct dc_interrupt_params int_params = {0};
3322 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3324 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3325 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3328 * Actions of amdgpu_irq_add_id():
3329 * 1. Register a set() function with base driver.
3330 * Base driver will call set() function to enable/disable an
3331 * interrupt in DC hardware.
3332 * 2. Register amdgpu_dm_irq_handler().
3333 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3334 * coming from DC hardware.
3335 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3336 * for acknowledging and handling. */
3338 /* Use VBLANK interrupt */
3339 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3340 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3342 DRM_ERROR("Failed to add crtc irq id!\n");
3346 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3347 int_params.irq_source =
3348 dc_interrupt_to_irq_source(dc, i+1 , 0);
3350 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3352 c_irq_params->adev = adev;
3353 c_irq_params->irq_src = int_params.irq_source;
3355 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3356 dm_crtc_high_irq, c_irq_params);
3359 /* Use GRPH_PFLIP interrupt */
3360 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3361 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3362 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3364 DRM_ERROR("Failed to add page flip irq id!\n");
3368 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3369 int_params.irq_source =
3370 dc_interrupt_to_irq_source(dc, i, 0);
3372 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3374 c_irq_params->adev = adev;
3375 c_irq_params->irq_src = int_params.irq_source;
3377 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3378 dm_pflip_high_irq, c_irq_params);
3383 r = amdgpu_irq_add_id(adev, client_id,
3384 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3386 DRM_ERROR("Failed to add hpd irq id!\n");
3390 register_hpd_handlers(adev);
3396 /* Register IRQ sources and initialize IRQ callbacks */
3397 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3399 struct dc *dc = adev->dm.dc;
3400 struct common_irq_params *c_irq_params;
3401 struct dc_interrupt_params int_params = {0};
3404 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3406 if (adev->family >= AMDGPU_FAMILY_AI)
3407 client_id = SOC15_IH_CLIENTID_DCE;
3409 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3410 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3413 * Actions of amdgpu_irq_add_id():
3414 * 1. Register a set() function with base driver.
3415 * Base driver will call set() function to enable/disable an
3416 * interrupt in DC hardware.
3417 * 2. Register amdgpu_dm_irq_handler().
3418 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3419 * coming from DC hardware.
3420 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3421 * for acknowledging and handling. */
3423 /* Use VBLANK interrupt */
3424 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3425 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3427 DRM_ERROR("Failed to add crtc irq id!\n");
3431 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3432 int_params.irq_source =
3433 dc_interrupt_to_irq_source(dc, i, 0);
3435 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3437 c_irq_params->adev = adev;
3438 c_irq_params->irq_src = int_params.irq_source;
3440 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3441 dm_crtc_high_irq, c_irq_params);
3444 /* Use VUPDATE interrupt */
3445 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3446 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3448 DRM_ERROR("Failed to add vupdate irq id!\n");
3452 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3453 int_params.irq_source =
3454 dc_interrupt_to_irq_source(dc, i, 0);
3456 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3458 c_irq_params->adev = adev;
3459 c_irq_params->irq_src = int_params.irq_source;
3461 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3462 dm_vupdate_high_irq, c_irq_params);
3465 /* Use GRPH_PFLIP interrupt */
3466 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3467 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3468 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3470 DRM_ERROR("Failed to add page flip irq id!\n");
3474 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3475 int_params.irq_source =
3476 dc_interrupt_to_irq_source(dc, i, 0);
3478 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3480 c_irq_params->adev = adev;
3481 c_irq_params->irq_src = int_params.irq_source;
3483 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3484 dm_pflip_high_irq, c_irq_params);
3489 r = amdgpu_irq_add_id(adev, client_id,
3490 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3492 DRM_ERROR("Failed to add hpd irq id!\n");
3496 register_hpd_handlers(adev);
3501 #if defined(CONFIG_DRM_AMD_DC_DCN)
3502 /* Register IRQ sources and initialize IRQ callbacks */
3503 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3505 struct dc *dc = adev->dm.dc;
3506 struct common_irq_params *c_irq_params;
3507 struct dc_interrupt_params int_params = {0};
3510 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3511 static const unsigned int vrtl_int_srcid[] = {
3512 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3513 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3514 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3515 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3516 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3517 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3521 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3522 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3525 * Actions of amdgpu_irq_add_id():
3526 * 1. Register a set() function with base driver.
3527 * Base driver will call set() function to enable/disable an
3528 * interrupt in DC hardware.
3529 * 2. Register amdgpu_dm_irq_handler().
3530 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3531 * coming from DC hardware.
3532 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3533 * for acknowledging and handling.
3536 /* Use VSTARTUP interrupt */
3537 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3538 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3540 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3543 DRM_ERROR("Failed to add crtc irq id!\n");
3547 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3548 int_params.irq_source =
3549 dc_interrupt_to_irq_source(dc, i, 0);
3551 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3553 c_irq_params->adev = adev;
3554 c_irq_params->irq_src = int_params.irq_source;
3556 amdgpu_dm_irq_register_interrupt(
3557 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3560 /* Use otg vertical line interrupt */
3561 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3562 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3563 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3564 vrtl_int_srcid[i], &adev->vline0_irq);
3567 DRM_ERROR("Failed to add vline0 irq id!\n");
3571 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3572 int_params.irq_source =
3573 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3575 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3576 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3580 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3581 - DC_IRQ_SOURCE_DC1_VLINE0];
3583 c_irq_params->adev = adev;
3584 c_irq_params->irq_src = int_params.irq_source;
3586 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3587 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3591 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3592 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3593 * to trigger at end of each vblank, regardless of state of the lock,
3594 * matching DCE behaviour.
3596 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3597 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3599 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3602 DRM_ERROR("Failed to add vupdate irq id!\n");
3606 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3607 int_params.irq_source =
3608 dc_interrupt_to_irq_source(dc, i, 0);
3610 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3612 c_irq_params->adev = adev;
3613 c_irq_params->irq_src = int_params.irq_source;
3615 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3616 dm_vupdate_high_irq, c_irq_params);
3619 /* Use GRPH_PFLIP interrupt */
3620 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3621 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3623 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3625 DRM_ERROR("Failed to add page flip irq id!\n");
3629 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3630 int_params.irq_source =
3631 dc_interrupt_to_irq_source(dc, i, 0);
3633 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3635 c_irq_params->adev = adev;
3636 c_irq_params->irq_src = int_params.irq_source;
3638 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3639 dm_pflip_high_irq, c_irq_params);
3644 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3647 DRM_ERROR("Failed to add hpd irq id!\n");
3651 register_hpd_handlers(adev);
3655 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3656 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3658 struct dc *dc = adev->dm.dc;
3659 struct common_irq_params *c_irq_params;
3660 struct dc_interrupt_params int_params = {0};
3663 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3664 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3666 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3667 &adev->dmub_outbox_irq);
3669 DRM_ERROR("Failed to add outbox irq id!\n");
3673 if (dc->ctx->dmub_srv) {
3674 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3675 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3676 int_params.irq_source =
3677 dc_interrupt_to_irq_source(dc, i, 0);
3679 c_irq_params = &adev->dm.dmub_outbox_params[0];
3681 c_irq_params->adev = adev;
3682 c_irq_params->irq_src = int_params.irq_source;
3684 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3685 dm_dmub_outbox1_low_irq, c_irq_params);
3693 * Acquires the lock for the atomic state object and returns
3694 * the new atomic state.
3696 * This should only be called during atomic check.
3698 static int dm_atomic_get_state(struct drm_atomic_state *state,
3699 struct dm_atomic_state **dm_state)
3701 struct drm_device *dev = state->dev;
3702 struct amdgpu_device *adev = drm_to_adev(dev);
3703 struct amdgpu_display_manager *dm = &adev->dm;
3704 struct drm_private_state *priv_state;
3709 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3710 if (IS_ERR(priv_state))
3711 return PTR_ERR(priv_state);
3713 *dm_state = to_dm_atomic_state(priv_state);
3718 static struct dm_atomic_state *
3719 dm_atomic_get_new_state(struct drm_atomic_state *state)
3721 struct drm_device *dev = state->dev;
3722 struct amdgpu_device *adev = drm_to_adev(dev);
3723 struct amdgpu_display_manager *dm = &adev->dm;
3724 struct drm_private_obj *obj;
3725 struct drm_private_state *new_obj_state;
3728 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3729 if (obj->funcs == dm->atomic_obj.funcs)
3730 return to_dm_atomic_state(new_obj_state);
3736 static struct drm_private_state *
3737 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3739 struct dm_atomic_state *old_state, *new_state;
3741 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3745 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3747 old_state = to_dm_atomic_state(obj->state);
3749 if (old_state && old_state->context)
3750 new_state->context = dc_copy_state(old_state->context);
3752 if (!new_state->context) {
3757 return &new_state->base;
3760 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3761 struct drm_private_state *state)
3763 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3765 if (dm_state && dm_state->context)
3766 dc_release_state(dm_state->context);
3771 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3772 .atomic_duplicate_state = dm_atomic_duplicate_state,
3773 .atomic_destroy_state = dm_atomic_destroy_state,
3776 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3778 struct dm_atomic_state *state;
3781 adev->mode_info.mode_config_initialized = true;
3783 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3784 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3786 adev_to_drm(adev)->mode_config.max_width = 16384;
3787 adev_to_drm(adev)->mode_config.max_height = 16384;
3789 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3790 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3791 /* indicates support for immediate flip */
3792 adev_to_drm(adev)->mode_config.async_page_flip = true;
3794 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3796 state = kzalloc(sizeof(*state), GFP_KERNEL);
3800 state->context = dc_create_state(adev->dm.dc);
3801 if (!state->context) {
3806 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3808 drm_atomic_private_obj_init(adev_to_drm(adev),
3809 &adev->dm.atomic_obj,
3811 &dm_atomic_state_funcs);
3813 r = amdgpu_display_modeset_create_props(adev);
3815 dc_release_state(state->context);
3820 r = amdgpu_dm_audio_init(adev);
3822 dc_release_state(state->context);
3830 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3831 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3832 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3834 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3835 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3837 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3840 #if defined(CONFIG_ACPI)
3841 struct amdgpu_dm_backlight_caps caps;
3843 memset(&caps, 0, sizeof(caps));
3845 if (dm->backlight_caps[bl_idx].caps_valid)
3848 amdgpu_acpi_get_backlight_caps(&caps);
3849 if (caps.caps_valid) {
3850 dm->backlight_caps[bl_idx].caps_valid = true;
3851 if (caps.aux_support)
3853 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3854 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3856 dm->backlight_caps[bl_idx].min_input_signal =
3857 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3858 dm->backlight_caps[bl_idx].max_input_signal =
3859 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3862 if (dm->backlight_caps[bl_idx].aux_support)
3865 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3866 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3870 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3871 unsigned *min, unsigned *max)
3876 if (caps->aux_support) {
3877 // Firmware limits are in nits, DC API wants millinits.
3878 *max = 1000 * caps->aux_max_input_signal;
3879 *min = 1000 * caps->aux_min_input_signal;
3881 // Firmware limits are 8-bit, PWM control is 16-bit.
3882 *max = 0x101 * caps->max_input_signal;
3883 *min = 0x101 * caps->min_input_signal;
3888 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3889 uint32_t brightness)
3893 if (!get_brightness_range(caps, &min, &max))
3896 // Rescale 0..255 to min..max
3897 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3898 AMDGPU_MAX_BL_LEVEL);
3901 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3902 uint32_t brightness)
3906 if (!get_brightness_range(caps, &min, &max))
3909 if (brightness < min)
3911 // Rescale min..max to 0..255
3912 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3916 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3918 u32 user_brightness)
3920 struct amdgpu_dm_backlight_caps caps;
3921 struct dc_link *link;
3925 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3926 caps = dm->backlight_caps[bl_idx];
3928 dm->brightness[bl_idx] = user_brightness;
3929 /* update scratch register */
3931 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3932 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3933 link = (struct dc_link *)dm->backlight_link[bl_idx];
3935 /* Change brightness based on AUX property */
3936 if (caps.aux_support) {
3937 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3938 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3940 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3942 rc = dc_link_set_backlight_level(link, brightness, 0);
3944 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3950 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3952 struct amdgpu_display_manager *dm = bl_get_data(bd);
3955 for (i = 0; i < dm->num_of_edps; i++) {
3956 if (bd == dm->backlight_dev[i])
3959 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3961 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3966 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3969 struct amdgpu_dm_backlight_caps caps;
3970 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3972 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3973 caps = dm->backlight_caps[bl_idx];
3975 if (caps.aux_support) {
3979 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3981 return dm->brightness[bl_idx];
3982 return convert_brightness_to_user(&caps, avg);
3984 int ret = dc_link_get_backlight_level(link);
3986 if (ret == DC_ERROR_UNEXPECTED)
3987 return dm->brightness[bl_idx];
3988 return convert_brightness_to_user(&caps, ret);
3992 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3994 struct amdgpu_display_manager *dm = bl_get_data(bd);
3997 for (i = 0; i < dm->num_of_edps; i++) {
3998 if (bd == dm->backlight_dev[i])
4001 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4003 return amdgpu_dm_backlight_get_level(dm, i);
4006 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4007 .options = BL_CORE_SUSPENDRESUME,
4008 .get_brightness = amdgpu_dm_backlight_get_brightness,
4009 .update_status = amdgpu_dm_backlight_update_status,
4013 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4016 struct backlight_properties props = { 0 };
4018 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4019 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4021 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4022 props.brightness = AMDGPU_MAX_BL_LEVEL;
4023 props.type = BACKLIGHT_RAW;
4025 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4026 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4028 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4029 adev_to_drm(dm->adev)->dev,
4031 &amdgpu_dm_backlight_ops,
4034 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4035 DRM_ERROR("DM: Backlight registration failed!\n");
4037 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4041 static int initialize_plane(struct amdgpu_display_manager *dm,
4042 struct amdgpu_mode_info *mode_info, int plane_id,
4043 enum drm_plane_type plane_type,
4044 const struct dc_plane_cap *plane_cap)
4046 struct drm_plane *plane;
4047 unsigned long possible_crtcs;
4050 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4052 DRM_ERROR("KMS: Failed to allocate plane\n");
4055 plane->type = plane_type;
4058 * HACK: IGT tests expect that the primary plane for a CRTC
4059 * can only have one possible CRTC. Only expose support for
4060 * any CRTC if they're not going to be used as a primary plane
4061 * for a CRTC - like overlay or underlay planes.
4063 possible_crtcs = 1 << plane_id;
4064 if (plane_id >= dm->dc->caps.max_streams)
4065 possible_crtcs = 0xff;
4067 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4070 DRM_ERROR("KMS: Failed to initialize plane\n");
4076 mode_info->planes[plane_id] = plane;
4082 static void register_backlight_device(struct amdgpu_display_manager *dm,
4083 struct dc_link *link)
4085 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4086 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4088 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4089 link->type != dc_connection_none) {
4091 * Event if registration failed, we should continue with
4092 * DM initialization because not having a backlight control
4093 * is better then a black screen.
4095 if (!dm->backlight_dev[dm->num_of_edps])
4096 amdgpu_dm_register_backlight_device(dm);
4098 if (dm->backlight_dev[dm->num_of_edps]) {
4099 dm->backlight_link[dm->num_of_edps] = link;
4108 * In this architecture, the association
4109 * connector -> encoder -> crtc
4110 * id not really requried. The crtc and connector will hold the
4111 * display_index as an abstraction to use with DAL component
4113 * Returns 0 on success
4115 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4117 struct amdgpu_display_manager *dm = &adev->dm;
4119 struct amdgpu_dm_connector *aconnector = NULL;
4120 struct amdgpu_encoder *aencoder = NULL;
4121 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4123 int32_t primary_planes;
4124 enum dc_connection_type new_connection_type = dc_connection_none;
4125 const struct dc_plane_cap *plane;
4126 bool psr_feature_enabled = false;
4128 dm->display_indexes_num = dm->dc->caps.max_streams;
4129 /* Update the actual used number of crtc */
4130 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4132 link_cnt = dm->dc->caps.max_links;
4133 if (amdgpu_dm_mode_config_init(dm->adev)) {
4134 DRM_ERROR("DM: Failed to initialize mode config\n");
4138 /* There is one primary plane per CRTC */
4139 primary_planes = dm->dc->caps.max_streams;
4140 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4143 * Initialize primary planes, implicit planes for legacy IOCTLS.
4144 * Order is reversed to match iteration order in atomic check.
4146 for (i = (primary_planes - 1); i >= 0; i--) {
4147 plane = &dm->dc->caps.planes[i];
4149 if (initialize_plane(dm, mode_info, i,
4150 DRM_PLANE_TYPE_PRIMARY, plane)) {
4151 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4157 * Initialize overlay planes, index starting after primary planes.
4158 * These planes have a higher DRM index than the primary planes since
4159 * they should be considered as having a higher z-order.
4160 * Order is reversed to match iteration order in atomic check.
4162 * Only support DCN for now, and only expose one so we don't encourage
4163 * userspace to use up all the pipes.
4165 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4166 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4168 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4171 if (!plane->blends_with_above || !plane->blends_with_below)
4174 if (!plane->pixel_format_support.argb8888)
4177 if (initialize_plane(dm, NULL, primary_planes + i,
4178 DRM_PLANE_TYPE_OVERLAY, plane)) {
4179 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4183 /* Only create one overlay plane. */
4187 for (i = 0; i < dm->dc->caps.max_streams; i++)
4188 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4189 DRM_ERROR("KMS: Failed to initialize crtc\n");
4193 #if defined(CONFIG_DRM_AMD_DC_DCN)
4194 /* Use Outbox interrupt */
4195 switch (adev->ip_versions[DCE_HWIP][0]) {
4196 case IP_VERSION(3, 0, 0):
4197 case IP_VERSION(3, 1, 2):
4198 case IP_VERSION(3, 1, 3):
4199 case IP_VERSION(2, 1, 0):
4200 if (register_outbox_irq_handlers(dm->adev)) {
4201 DRM_ERROR("DM: Failed to initialize IRQ\n");
4206 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4207 adev->ip_versions[DCE_HWIP][0]);
4210 /* Determine whether to enable PSR support by default. */
4211 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4212 switch (adev->ip_versions[DCE_HWIP][0]) {
4213 case IP_VERSION(3, 1, 2):
4214 case IP_VERSION(3, 1, 3):
4215 psr_feature_enabled = true;
4218 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4224 /* loops over all connectors on the board */
4225 for (i = 0; i < link_cnt; i++) {
4226 struct dc_link *link = NULL;
4228 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4230 "KMS: Cannot support more than %d display indexes\n",
4231 AMDGPU_DM_MAX_DISPLAY_INDEX);
4235 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4239 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4243 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4244 DRM_ERROR("KMS: Failed to initialize encoder\n");
4248 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4249 DRM_ERROR("KMS: Failed to initialize connector\n");
4253 link = dc_get_link_at_index(dm->dc, i);
4255 if (!dc_link_detect_sink(link, &new_connection_type))
4256 DRM_ERROR("KMS: Failed to detect connector\n");
4258 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4259 emulated_link_detect(link);
4260 amdgpu_dm_update_connector_after_detect(aconnector);
4262 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4263 amdgpu_dm_update_connector_after_detect(aconnector);
4264 register_backlight_device(dm, link);
4265 if (dm->num_of_edps)
4266 update_connector_ext_caps(aconnector);
4267 if (psr_feature_enabled)
4268 amdgpu_dm_set_psr_caps(link);
4274 /* Software is initialized. Now we can register interrupt handlers. */
4275 switch (adev->asic_type) {
4276 #if defined(CONFIG_DRM_AMD_DC_SI)
4281 if (dce60_register_irq_handlers(dm->adev)) {
4282 DRM_ERROR("DM: Failed to initialize IRQ\n");
4296 case CHIP_POLARIS11:
4297 case CHIP_POLARIS10:
4298 case CHIP_POLARIS12:
4303 if (dce110_register_irq_handlers(dm->adev)) {
4304 DRM_ERROR("DM: Failed to initialize IRQ\n");
4309 #if defined(CONFIG_DRM_AMD_DC_DCN)
4310 switch (adev->ip_versions[DCE_HWIP][0]) {
4311 case IP_VERSION(1, 0, 0):
4312 case IP_VERSION(1, 0, 1):
4313 case IP_VERSION(2, 0, 2):
4314 case IP_VERSION(2, 0, 3):
4315 case IP_VERSION(2, 0, 0):
4316 case IP_VERSION(2, 1, 0):
4317 case IP_VERSION(3, 0, 0):
4318 case IP_VERSION(3, 0, 2):
4319 case IP_VERSION(3, 0, 3):
4320 case IP_VERSION(3, 0, 1):
4321 case IP_VERSION(3, 1, 2):
4322 case IP_VERSION(3, 1, 3):
4323 if (dcn10_register_irq_handlers(dm->adev)) {
4324 DRM_ERROR("DM: Failed to initialize IRQ\n");
4329 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4330 adev->ip_versions[DCE_HWIP][0]);
4345 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4347 drm_atomic_private_obj_fini(&dm->atomic_obj);
4351 /******************************************************************************
4352 * amdgpu_display_funcs functions
4353 *****************************************************************************/
4356 * dm_bandwidth_update - program display watermarks
4358 * @adev: amdgpu_device pointer
4360 * Calculate and program the display watermarks and line buffer allocation.
4362 static void dm_bandwidth_update(struct amdgpu_device *adev)
4364 /* TODO: implement later */
4367 static const struct amdgpu_display_funcs dm_display_funcs = {
4368 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4369 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4370 .backlight_set_level = NULL, /* never called for DC */
4371 .backlight_get_level = NULL, /* never called for DC */
4372 .hpd_sense = NULL,/* called unconditionally */
4373 .hpd_set_polarity = NULL, /* called unconditionally */
4374 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4375 .page_flip_get_scanoutpos =
4376 dm_crtc_get_scanoutpos,/* called unconditionally */
4377 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4378 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4381 #if defined(CONFIG_DEBUG_KERNEL_DC)
4383 static ssize_t s3_debug_store(struct device *device,
4384 struct device_attribute *attr,
4390 struct drm_device *drm_dev = dev_get_drvdata(device);
4391 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4393 ret = kstrtoint(buf, 0, &s3_state);
4398 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4403 return ret == 0 ? count : 0;
4406 DEVICE_ATTR_WO(s3_debug);
4410 static int dm_early_init(void *handle)
4412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4414 switch (adev->asic_type) {
4415 #if defined(CONFIG_DRM_AMD_DC_SI)
4419 adev->mode_info.num_crtc = 6;
4420 adev->mode_info.num_hpd = 6;
4421 adev->mode_info.num_dig = 6;
4424 adev->mode_info.num_crtc = 2;
4425 adev->mode_info.num_hpd = 2;
4426 adev->mode_info.num_dig = 2;
4431 adev->mode_info.num_crtc = 6;
4432 adev->mode_info.num_hpd = 6;
4433 adev->mode_info.num_dig = 6;
4436 adev->mode_info.num_crtc = 4;
4437 adev->mode_info.num_hpd = 6;
4438 adev->mode_info.num_dig = 7;
4442 adev->mode_info.num_crtc = 2;
4443 adev->mode_info.num_hpd = 6;
4444 adev->mode_info.num_dig = 6;
4448 adev->mode_info.num_crtc = 6;
4449 adev->mode_info.num_hpd = 6;
4450 adev->mode_info.num_dig = 7;
4453 adev->mode_info.num_crtc = 3;
4454 adev->mode_info.num_hpd = 6;
4455 adev->mode_info.num_dig = 9;
4458 adev->mode_info.num_crtc = 2;
4459 adev->mode_info.num_hpd = 6;
4460 adev->mode_info.num_dig = 9;
4462 case CHIP_POLARIS11:
4463 case CHIP_POLARIS12:
4464 adev->mode_info.num_crtc = 5;
4465 adev->mode_info.num_hpd = 5;
4466 adev->mode_info.num_dig = 5;
4468 case CHIP_POLARIS10:
4470 adev->mode_info.num_crtc = 6;
4471 adev->mode_info.num_hpd = 6;
4472 adev->mode_info.num_dig = 6;
4477 adev->mode_info.num_crtc = 6;
4478 adev->mode_info.num_hpd = 6;
4479 adev->mode_info.num_dig = 6;
4482 #if defined(CONFIG_DRM_AMD_DC_DCN)
4483 switch (adev->ip_versions[DCE_HWIP][0]) {
4484 case IP_VERSION(2, 0, 2):
4485 case IP_VERSION(3, 0, 0):
4486 adev->mode_info.num_crtc = 6;
4487 adev->mode_info.num_hpd = 6;
4488 adev->mode_info.num_dig = 6;
4490 case IP_VERSION(2, 0, 0):
4491 case IP_VERSION(3, 0, 2):
4492 adev->mode_info.num_crtc = 5;
4493 adev->mode_info.num_hpd = 5;
4494 adev->mode_info.num_dig = 5;
4496 case IP_VERSION(2, 0, 3):
4497 case IP_VERSION(3, 0, 3):
4498 adev->mode_info.num_crtc = 2;
4499 adev->mode_info.num_hpd = 2;
4500 adev->mode_info.num_dig = 2;
4502 case IP_VERSION(1, 0, 0):
4503 case IP_VERSION(1, 0, 1):
4504 case IP_VERSION(3, 0, 1):
4505 case IP_VERSION(2, 1, 0):
4506 case IP_VERSION(3, 1, 2):
4507 case IP_VERSION(3, 1, 3):
4508 adev->mode_info.num_crtc = 4;
4509 adev->mode_info.num_hpd = 4;
4510 adev->mode_info.num_dig = 4;
4513 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4514 adev->ip_versions[DCE_HWIP][0]);
4521 amdgpu_dm_set_irq_funcs(adev);
4523 if (adev->mode_info.funcs == NULL)
4524 adev->mode_info.funcs = &dm_display_funcs;
4527 * Note: Do NOT change adev->audio_endpt_rreg and
4528 * adev->audio_endpt_wreg because they are initialised in
4529 * amdgpu_device_init()
4531 #if defined(CONFIG_DEBUG_KERNEL_DC)
4533 adev_to_drm(adev)->dev,
4534 &dev_attr_s3_debug);
4540 static bool modeset_required(struct drm_crtc_state *crtc_state,
4541 struct dc_stream_state *new_stream,
4542 struct dc_stream_state *old_stream)
4544 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4547 static bool modereset_required(struct drm_crtc_state *crtc_state)
4549 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4552 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4554 drm_encoder_cleanup(encoder);
4558 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4559 .destroy = amdgpu_dm_encoder_destroy,
4563 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4564 struct drm_framebuffer *fb,
4565 int *min_downscale, int *max_upscale)
4567 struct amdgpu_device *adev = drm_to_adev(dev);
4568 struct dc *dc = adev->dm.dc;
4569 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4570 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4572 switch (fb->format->format) {
4573 case DRM_FORMAT_P010:
4574 case DRM_FORMAT_NV12:
4575 case DRM_FORMAT_NV21:
4576 *max_upscale = plane_cap->max_upscale_factor.nv12;
4577 *min_downscale = plane_cap->max_downscale_factor.nv12;
4580 case DRM_FORMAT_XRGB16161616F:
4581 case DRM_FORMAT_ARGB16161616F:
4582 case DRM_FORMAT_XBGR16161616F:
4583 case DRM_FORMAT_ABGR16161616F:
4584 *max_upscale = plane_cap->max_upscale_factor.fp16;
4585 *min_downscale = plane_cap->max_downscale_factor.fp16;
4589 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4590 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4595 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4596 * scaling factor of 1.0 == 1000 units.
4598 if (*max_upscale == 1)
4599 *max_upscale = 1000;
4601 if (*min_downscale == 1)
4602 *min_downscale = 1000;
4606 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4607 const struct drm_plane_state *state,
4608 struct dc_scaling_info *scaling_info)
4610 int scale_w, scale_h, min_downscale, max_upscale;
4612 memset(scaling_info, 0, sizeof(*scaling_info));
4614 /* Source is fixed 16.16 but we ignore mantissa for now... */
4615 scaling_info->src_rect.x = state->src_x >> 16;
4616 scaling_info->src_rect.y = state->src_y >> 16;
4619 * For reasons we don't (yet) fully understand a non-zero
4620 * src_y coordinate into an NV12 buffer can cause a
4621 * system hang on DCN1x.
4622 * To avoid hangs (and maybe be overly cautious)
4623 * let's reject both non-zero src_x and src_y.
4625 * We currently know of only one use-case to reproduce a
4626 * scenario with non-zero src_x and src_y for NV12, which
4627 * is to gesture the YouTube Android app into full screen
4630 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4631 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4632 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4633 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4636 scaling_info->src_rect.width = state->src_w >> 16;
4637 if (scaling_info->src_rect.width == 0)
4640 scaling_info->src_rect.height = state->src_h >> 16;
4641 if (scaling_info->src_rect.height == 0)
4644 scaling_info->dst_rect.x = state->crtc_x;
4645 scaling_info->dst_rect.y = state->crtc_y;
4647 if (state->crtc_w == 0)
4650 scaling_info->dst_rect.width = state->crtc_w;
4652 if (state->crtc_h == 0)
4655 scaling_info->dst_rect.height = state->crtc_h;
4657 /* DRM doesn't specify clipping on destination output. */
4658 scaling_info->clip_rect = scaling_info->dst_rect;
4660 /* Validate scaling per-format with DC plane caps */
4661 if (state->plane && state->plane->dev && state->fb) {
4662 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4663 &min_downscale, &max_upscale);
4665 min_downscale = 250;
4666 max_upscale = 16000;
4669 scale_w = scaling_info->dst_rect.width * 1000 /
4670 scaling_info->src_rect.width;
4672 if (scale_w < min_downscale || scale_w > max_upscale)
4675 scale_h = scaling_info->dst_rect.height * 1000 /
4676 scaling_info->src_rect.height;
4678 if (scale_h < min_downscale || scale_h > max_upscale)
4682 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4683 * assume reasonable defaults based on the format.
4690 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4691 uint64_t tiling_flags)
4693 /* Fill GFX8 params */
4694 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4695 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4697 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4698 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4699 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4700 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4701 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4703 /* XXX fix me for VI */
4704 tiling_info->gfx8.num_banks = num_banks;
4705 tiling_info->gfx8.array_mode =
4706 DC_ARRAY_2D_TILED_THIN1;
4707 tiling_info->gfx8.tile_split = tile_split;
4708 tiling_info->gfx8.bank_width = bankw;
4709 tiling_info->gfx8.bank_height = bankh;
4710 tiling_info->gfx8.tile_aspect = mtaspect;
4711 tiling_info->gfx8.tile_mode =
4712 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4713 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4714 == DC_ARRAY_1D_TILED_THIN1) {
4715 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4718 tiling_info->gfx8.pipe_config =
4719 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4723 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4724 union dc_tiling_info *tiling_info)
4726 tiling_info->gfx9.num_pipes =
4727 adev->gfx.config.gb_addr_config_fields.num_pipes;
4728 tiling_info->gfx9.num_banks =
4729 adev->gfx.config.gb_addr_config_fields.num_banks;
4730 tiling_info->gfx9.pipe_interleave =
4731 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4732 tiling_info->gfx9.num_shader_engines =
4733 adev->gfx.config.gb_addr_config_fields.num_se;
4734 tiling_info->gfx9.max_compressed_frags =
4735 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4736 tiling_info->gfx9.num_rb_per_se =
4737 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4738 tiling_info->gfx9.shaderEnable = 1;
4739 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4740 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4744 validate_dcc(struct amdgpu_device *adev,
4745 const enum surface_pixel_format format,
4746 const enum dc_rotation_angle rotation,
4747 const union dc_tiling_info *tiling_info,
4748 const struct dc_plane_dcc_param *dcc,
4749 const struct dc_plane_address *address,
4750 const struct plane_size *plane_size)
4752 struct dc *dc = adev->dm.dc;
4753 struct dc_dcc_surface_param input;
4754 struct dc_surface_dcc_cap output;
4756 memset(&input, 0, sizeof(input));
4757 memset(&output, 0, sizeof(output));
4762 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4763 !dc->cap_funcs.get_dcc_compression_cap)
4766 input.format = format;
4767 input.surface_size.width = plane_size->surface_size.width;
4768 input.surface_size.height = plane_size->surface_size.height;
4769 input.swizzle_mode = tiling_info->gfx9.swizzle;
4771 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4772 input.scan = SCAN_DIRECTION_HORIZONTAL;
4773 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4774 input.scan = SCAN_DIRECTION_VERTICAL;
4776 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4779 if (!output.capable)
4782 if (dcc->independent_64b_blks == 0 &&
4783 output.grph.rgb.independent_64b_blks != 0)
4790 modifier_has_dcc(uint64_t modifier)
4792 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4796 modifier_gfx9_swizzle_mode(uint64_t modifier)
4798 if (modifier == DRM_FORMAT_MOD_LINEAR)
4801 return AMD_FMT_MOD_GET(TILE, modifier);
4804 static const struct drm_format_info *
4805 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4807 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4811 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4812 union dc_tiling_info *tiling_info,
4815 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4816 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4817 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4818 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4820 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4822 if (!IS_AMD_FMT_MOD(modifier))
4825 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4826 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4828 if (adev->family >= AMDGPU_FAMILY_NV) {
4829 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4831 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4833 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4837 enum dm_micro_swizzle {
4838 MICRO_SWIZZLE_Z = 0,
4839 MICRO_SWIZZLE_S = 1,
4840 MICRO_SWIZZLE_D = 2,
4844 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4848 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4849 const struct drm_format_info *info = drm_format_info(format);
4852 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4858 * We always have to allow these modifiers:
4859 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4860 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4862 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4863 modifier == DRM_FORMAT_MOD_INVALID) {
4867 /* Check that the modifier is on the list of the plane's supported modifiers. */
4868 for (i = 0; i < plane->modifier_count; i++) {
4869 if (modifier == plane->modifiers[i])
4872 if (i == plane->modifier_count)
4876 * For D swizzle the canonical modifier depends on the bpp, so check
4879 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4880 adev->family >= AMDGPU_FAMILY_NV) {
4881 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4885 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4889 if (modifier_has_dcc(modifier)) {
4890 /* Per radeonsi comments 16/64 bpp are more complicated. */
4891 if (info->cpp[0] != 4)
4893 /* We support multi-planar formats, but not when combined with
4894 * additional DCC metadata planes. */
4895 if (info->num_planes > 1)
4903 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4908 if (*cap - *size < 1) {
4909 uint64_t new_cap = *cap * 2;
4910 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4918 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4924 (*mods)[*size] = mod;
4929 add_gfx9_modifiers(const struct amdgpu_device *adev,
4930 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4932 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4933 int pipe_xor_bits = min(8, pipes +
4934 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4935 int bank_xor_bits = min(8 - pipe_xor_bits,
4936 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4937 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4938 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4941 if (adev->family == AMDGPU_FAMILY_RV) {
4942 /* Raven2 and later */
4943 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4946 * No _D DCC swizzles yet because we only allow 32bpp, which
4947 * doesn't support _D on DCN
4950 if (has_constant_encode) {
4951 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4952 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4953 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4954 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4955 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4956 AMD_FMT_MOD_SET(DCC, 1) |
4957 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4958 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4959 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4962 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4963 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4964 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4965 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4966 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4967 AMD_FMT_MOD_SET(DCC, 1) |
4968 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4969 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4970 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4972 if (has_constant_encode) {
4973 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4974 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4975 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4976 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4977 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4978 AMD_FMT_MOD_SET(DCC, 1) |
4979 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4980 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4981 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4983 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4984 AMD_FMT_MOD_SET(RB, rb) |
4985 AMD_FMT_MOD_SET(PIPE, pipes));
4988 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4989 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4990 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4991 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4992 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4993 AMD_FMT_MOD_SET(DCC, 1) |
4994 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4995 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4996 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4997 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4998 AMD_FMT_MOD_SET(RB, rb) |
4999 AMD_FMT_MOD_SET(PIPE, pipes));
5003 * Only supported for 64bpp on Raven, will be filtered on format in
5004 * dm_plane_format_mod_supported.
5006 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5007 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5008 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5009 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5010 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5012 if (adev->family == AMDGPU_FAMILY_RV) {
5013 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5014 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5015 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5016 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5017 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5021 * Only supported for 64bpp on Raven, will be filtered on format in
5022 * dm_plane_format_mod_supported.
5024 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5025 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5026 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5028 if (adev->family == AMDGPU_FAMILY_RV) {
5029 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5031 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5036 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5037 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5039 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5041 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5042 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5043 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5044 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5045 AMD_FMT_MOD_SET(DCC, 1) |
5046 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5047 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5048 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5050 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5052 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5053 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5054 AMD_FMT_MOD_SET(DCC, 1) |
5055 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5056 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5057 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5058 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5060 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5062 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5063 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5065 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5066 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5067 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5068 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5071 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5072 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5074 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5076 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5077 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5078 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5082 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5083 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5085 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5086 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5088 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5089 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5090 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5091 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5092 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5093 AMD_FMT_MOD_SET(DCC, 1) |
5094 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5097 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5099 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5100 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5101 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5102 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5103 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5104 AMD_FMT_MOD_SET(DCC, 1) |
5105 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5106 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5107 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5109 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5111 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5112 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5113 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5114 AMD_FMT_MOD_SET(DCC, 1) |
5115 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5116 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5117 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5118 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5119 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5121 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5123 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5124 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5125 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5126 AMD_FMT_MOD_SET(DCC, 1) |
5127 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5128 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5129 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5130 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5132 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5133 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5134 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5135 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5136 AMD_FMT_MOD_SET(PACKERS, pkrs));
5138 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5140 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5141 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5142 AMD_FMT_MOD_SET(PACKERS, pkrs));
5144 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5145 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5147 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5149 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5157 uint64_t size = 0, capacity = 128;
5160 /* We have not hooked up any pre-GFX9 modifiers. */
5161 if (adev->family < AMDGPU_FAMILY_AI)
5164 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5166 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5167 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5168 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5169 return *mods ? 0 : -ENOMEM;
5172 switch (adev->family) {
5173 case AMDGPU_FAMILY_AI:
5174 case AMDGPU_FAMILY_RV:
5175 add_gfx9_modifiers(adev, mods, &size, &capacity);
5177 case AMDGPU_FAMILY_NV:
5178 case AMDGPU_FAMILY_VGH:
5179 case AMDGPU_FAMILY_YC:
5180 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5181 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5183 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5187 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5189 /* INVALID marks the end of the list. */
5190 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5199 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5200 const struct amdgpu_framebuffer *afb,
5201 const enum surface_pixel_format format,
5202 const enum dc_rotation_angle rotation,
5203 const struct plane_size *plane_size,
5204 union dc_tiling_info *tiling_info,
5205 struct dc_plane_dcc_param *dcc,
5206 struct dc_plane_address *address,
5207 const bool force_disable_dcc)
5209 const uint64_t modifier = afb->base.modifier;
5212 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5213 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5215 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5216 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5217 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5218 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5221 dcc->meta_pitch = afb->base.pitches[1];
5222 dcc->independent_64b_blks = independent_64b_blks;
5223 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5224 if (independent_64b_blks && independent_128b_blks)
5225 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5226 else if (independent_128b_blks)
5227 dcc->dcc_ind_blk = hubp_ind_block_128b;
5228 else if (independent_64b_blks && !independent_128b_blks)
5229 dcc->dcc_ind_blk = hubp_ind_block_64b;
5231 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5233 if (independent_64b_blks)
5234 dcc->dcc_ind_blk = hubp_ind_block_64b;
5236 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5239 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5240 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5243 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5245 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5251 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5252 const struct amdgpu_framebuffer *afb,
5253 const enum surface_pixel_format format,
5254 const enum dc_rotation_angle rotation,
5255 const uint64_t tiling_flags,
5256 union dc_tiling_info *tiling_info,
5257 struct plane_size *plane_size,
5258 struct dc_plane_dcc_param *dcc,
5259 struct dc_plane_address *address,
5261 bool force_disable_dcc)
5263 const struct drm_framebuffer *fb = &afb->base;
5266 memset(tiling_info, 0, sizeof(*tiling_info));
5267 memset(plane_size, 0, sizeof(*plane_size));
5268 memset(dcc, 0, sizeof(*dcc));
5269 memset(address, 0, sizeof(*address));
5271 address->tmz_surface = tmz_surface;
5273 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5274 uint64_t addr = afb->address + fb->offsets[0];
5276 plane_size->surface_size.x = 0;
5277 plane_size->surface_size.y = 0;
5278 plane_size->surface_size.width = fb->width;
5279 plane_size->surface_size.height = fb->height;
5280 plane_size->surface_pitch =
5281 fb->pitches[0] / fb->format->cpp[0];
5283 address->type = PLN_ADDR_TYPE_GRAPHICS;
5284 address->grph.addr.low_part = lower_32_bits(addr);
5285 address->grph.addr.high_part = upper_32_bits(addr);
5286 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5287 uint64_t luma_addr = afb->address + fb->offsets[0];
5288 uint64_t chroma_addr = afb->address + fb->offsets[1];
5290 plane_size->surface_size.x = 0;
5291 plane_size->surface_size.y = 0;
5292 plane_size->surface_size.width = fb->width;
5293 plane_size->surface_size.height = fb->height;
5294 plane_size->surface_pitch =
5295 fb->pitches[0] / fb->format->cpp[0];
5297 plane_size->chroma_size.x = 0;
5298 plane_size->chroma_size.y = 0;
5299 /* TODO: set these based on surface format */
5300 plane_size->chroma_size.width = fb->width / 2;
5301 plane_size->chroma_size.height = fb->height / 2;
5303 plane_size->chroma_pitch =
5304 fb->pitches[1] / fb->format->cpp[1];
5306 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5307 address->video_progressive.luma_addr.low_part =
5308 lower_32_bits(luma_addr);
5309 address->video_progressive.luma_addr.high_part =
5310 upper_32_bits(luma_addr);
5311 address->video_progressive.chroma_addr.low_part =
5312 lower_32_bits(chroma_addr);
5313 address->video_progressive.chroma_addr.high_part =
5314 upper_32_bits(chroma_addr);
5317 if (adev->family >= AMDGPU_FAMILY_AI) {
5318 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5319 rotation, plane_size,
5326 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5333 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5334 bool *per_pixel_alpha, bool *global_alpha,
5335 int *global_alpha_value)
5337 *per_pixel_alpha = false;
5338 *global_alpha = false;
5339 *global_alpha_value = 0xff;
5341 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5344 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5345 static const uint32_t alpha_formats[] = {
5346 DRM_FORMAT_ARGB8888,
5347 DRM_FORMAT_RGBA8888,
5348 DRM_FORMAT_ABGR8888,
5350 uint32_t format = plane_state->fb->format->format;
5353 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5354 if (format == alpha_formats[i]) {
5355 *per_pixel_alpha = true;
5361 if (plane_state->alpha < 0xffff) {
5362 *global_alpha = true;
5363 *global_alpha_value = plane_state->alpha >> 8;
5368 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5369 const enum surface_pixel_format format,
5370 enum dc_color_space *color_space)
5374 *color_space = COLOR_SPACE_SRGB;
5376 /* DRM color properties only affect non-RGB formats. */
5377 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5380 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5382 switch (plane_state->color_encoding) {
5383 case DRM_COLOR_YCBCR_BT601:
5385 *color_space = COLOR_SPACE_YCBCR601;
5387 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5390 case DRM_COLOR_YCBCR_BT709:
5392 *color_space = COLOR_SPACE_YCBCR709;
5394 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5397 case DRM_COLOR_YCBCR_BT2020:
5399 *color_space = COLOR_SPACE_2020_YCBCR;
5412 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5413 const struct drm_plane_state *plane_state,
5414 const uint64_t tiling_flags,
5415 struct dc_plane_info *plane_info,
5416 struct dc_plane_address *address,
5418 bool force_disable_dcc)
5420 const struct drm_framebuffer *fb = plane_state->fb;
5421 const struct amdgpu_framebuffer *afb =
5422 to_amdgpu_framebuffer(plane_state->fb);
5425 memset(plane_info, 0, sizeof(*plane_info));
5427 switch (fb->format->format) {
5429 plane_info->format =
5430 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5432 case DRM_FORMAT_RGB565:
5433 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5435 case DRM_FORMAT_XRGB8888:
5436 case DRM_FORMAT_ARGB8888:
5437 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5439 case DRM_FORMAT_XRGB2101010:
5440 case DRM_FORMAT_ARGB2101010:
5441 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5443 case DRM_FORMAT_XBGR2101010:
5444 case DRM_FORMAT_ABGR2101010:
5445 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5447 case DRM_FORMAT_XBGR8888:
5448 case DRM_FORMAT_ABGR8888:
5449 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5451 case DRM_FORMAT_NV21:
5452 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5454 case DRM_FORMAT_NV12:
5455 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5457 case DRM_FORMAT_P010:
5458 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5460 case DRM_FORMAT_XRGB16161616F:
5461 case DRM_FORMAT_ARGB16161616F:
5462 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5464 case DRM_FORMAT_XBGR16161616F:
5465 case DRM_FORMAT_ABGR16161616F:
5466 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5468 case DRM_FORMAT_XRGB16161616:
5469 case DRM_FORMAT_ARGB16161616:
5470 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5472 case DRM_FORMAT_XBGR16161616:
5473 case DRM_FORMAT_ABGR16161616:
5474 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5478 "Unsupported screen format %p4cc\n",
5479 &fb->format->format);
5483 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5484 case DRM_MODE_ROTATE_0:
5485 plane_info->rotation = ROTATION_ANGLE_0;
5487 case DRM_MODE_ROTATE_90:
5488 plane_info->rotation = ROTATION_ANGLE_90;
5490 case DRM_MODE_ROTATE_180:
5491 plane_info->rotation = ROTATION_ANGLE_180;
5493 case DRM_MODE_ROTATE_270:
5494 plane_info->rotation = ROTATION_ANGLE_270;
5497 plane_info->rotation = ROTATION_ANGLE_0;
5501 plane_info->visible = true;
5502 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5504 plane_info->layer_index = 0;
5506 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5507 &plane_info->color_space);
5511 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5512 plane_info->rotation, tiling_flags,
5513 &plane_info->tiling_info,
5514 &plane_info->plane_size,
5515 &plane_info->dcc, address, tmz_surface,
5520 fill_blending_from_plane_state(
5521 plane_state, &plane_info->per_pixel_alpha,
5522 &plane_info->global_alpha, &plane_info->global_alpha_value);
5527 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5528 struct dc_plane_state *dc_plane_state,
5529 struct drm_plane_state *plane_state,
5530 struct drm_crtc_state *crtc_state)
5532 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5533 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5534 struct dc_scaling_info scaling_info;
5535 struct dc_plane_info plane_info;
5537 bool force_disable_dcc = false;
5539 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5543 dc_plane_state->src_rect = scaling_info.src_rect;
5544 dc_plane_state->dst_rect = scaling_info.dst_rect;
5545 dc_plane_state->clip_rect = scaling_info.clip_rect;
5546 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5548 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5549 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5552 &dc_plane_state->address,
5558 dc_plane_state->format = plane_info.format;
5559 dc_plane_state->color_space = plane_info.color_space;
5560 dc_plane_state->format = plane_info.format;
5561 dc_plane_state->plane_size = plane_info.plane_size;
5562 dc_plane_state->rotation = plane_info.rotation;
5563 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5564 dc_plane_state->stereo_format = plane_info.stereo_format;
5565 dc_plane_state->tiling_info = plane_info.tiling_info;
5566 dc_plane_state->visible = plane_info.visible;
5567 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5568 dc_plane_state->global_alpha = plane_info.global_alpha;
5569 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5570 dc_plane_state->dcc = plane_info.dcc;
5571 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5572 dc_plane_state->flip_int_enabled = true;
5575 * Always set input transfer function, since plane state is refreshed
5578 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5585 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5586 const struct dm_connector_state *dm_state,
5587 struct dc_stream_state *stream)
5589 enum amdgpu_rmx_type rmx_type;
5591 struct rect src = { 0 }; /* viewport in composition space*/
5592 struct rect dst = { 0 }; /* stream addressable area */
5594 /* no mode. nothing to be done */
5598 /* Full screen scaling by default */
5599 src.width = mode->hdisplay;
5600 src.height = mode->vdisplay;
5601 dst.width = stream->timing.h_addressable;
5602 dst.height = stream->timing.v_addressable;
5605 rmx_type = dm_state->scaling;
5606 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5607 if (src.width * dst.height <
5608 src.height * dst.width) {
5609 /* height needs less upscaling/more downscaling */
5610 dst.width = src.width *
5611 dst.height / src.height;
5613 /* width needs less upscaling/more downscaling */
5614 dst.height = src.height *
5615 dst.width / src.width;
5617 } else if (rmx_type == RMX_CENTER) {
5621 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5622 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5624 if (dm_state->underscan_enable) {
5625 dst.x += dm_state->underscan_hborder / 2;
5626 dst.y += dm_state->underscan_vborder / 2;
5627 dst.width -= dm_state->underscan_hborder;
5628 dst.height -= dm_state->underscan_vborder;
5635 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5636 dst.x, dst.y, dst.width, dst.height);
5640 static enum dc_color_depth
5641 convert_color_depth_from_display_info(const struct drm_connector *connector,
5642 bool is_y420, int requested_bpc)
5649 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5650 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5652 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5654 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5657 bpc = (uint8_t)connector->display_info.bpc;
5658 /* Assume 8 bpc by default if no bpc is specified. */
5659 bpc = bpc ? bpc : 8;
5662 if (requested_bpc > 0) {
5664 * Cap display bpc based on the user requested value.
5666 * The value for state->max_bpc may not correctly updated
5667 * depending on when the connector gets added to the state
5668 * or if this was called outside of atomic check, so it
5669 * can't be used directly.
5671 bpc = min_t(u8, bpc, requested_bpc);
5673 /* Round down to the nearest even number. */
5674 bpc = bpc - (bpc & 1);
5680 * Temporary Work around, DRM doesn't parse color depth for
5681 * EDID revision before 1.4
5682 * TODO: Fix edid parsing
5684 return COLOR_DEPTH_888;
5686 return COLOR_DEPTH_666;
5688 return COLOR_DEPTH_888;
5690 return COLOR_DEPTH_101010;
5692 return COLOR_DEPTH_121212;
5694 return COLOR_DEPTH_141414;
5696 return COLOR_DEPTH_161616;
5698 return COLOR_DEPTH_UNDEFINED;
5702 static enum dc_aspect_ratio
5703 get_aspect_ratio(const struct drm_display_mode *mode_in)
5705 /* 1-1 mapping, since both enums follow the HDMI spec. */
5706 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5709 static enum dc_color_space
5710 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5712 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5714 switch (dc_crtc_timing->pixel_encoding) {
5715 case PIXEL_ENCODING_YCBCR422:
5716 case PIXEL_ENCODING_YCBCR444:
5717 case PIXEL_ENCODING_YCBCR420:
5720 * 27030khz is the separation point between HDTV and SDTV
5721 * according to HDMI spec, we use YCbCr709 and YCbCr601
5724 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5725 if (dc_crtc_timing->flags.Y_ONLY)
5727 COLOR_SPACE_YCBCR709_LIMITED;
5729 color_space = COLOR_SPACE_YCBCR709;
5731 if (dc_crtc_timing->flags.Y_ONLY)
5733 COLOR_SPACE_YCBCR601_LIMITED;
5735 color_space = COLOR_SPACE_YCBCR601;
5740 case PIXEL_ENCODING_RGB:
5741 color_space = COLOR_SPACE_SRGB;
5752 static bool adjust_colour_depth_from_display_info(
5753 struct dc_crtc_timing *timing_out,
5754 const struct drm_display_info *info)
5756 enum dc_color_depth depth = timing_out->display_color_depth;
5759 normalized_clk = timing_out->pix_clk_100hz / 10;
5760 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5761 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5762 normalized_clk /= 2;
5763 /* Adjusting pix clock following on HDMI spec based on colour depth */
5765 case COLOR_DEPTH_888:
5767 case COLOR_DEPTH_101010:
5768 normalized_clk = (normalized_clk * 30) / 24;
5770 case COLOR_DEPTH_121212:
5771 normalized_clk = (normalized_clk * 36) / 24;
5773 case COLOR_DEPTH_161616:
5774 normalized_clk = (normalized_clk * 48) / 24;
5777 /* The above depths are the only ones valid for HDMI. */
5780 if (normalized_clk <= info->max_tmds_clock) {
5781 timing_out->display_color_depth = depth;
5784 } while (--depth > COLOR_DEPTH_666);
5788 static void fill_stream_properties_from_drm_display_mode(
5789 struct dc_stream_state *stream,
5790 const struct drm_display_mode *mode_in,
5791 const struct drm_connector *connector,
5792 const struct drm_connector_state *connector_state,
5793 const struct dc_stream_state *old_stream,
5796 struct dc_crtc_timing *timing_out = &stream->timing;
5797 const struct drm_display_info *info = &connector->display_info;
5798 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5799 struct hdmi_vendor_infoframe hv_frame;
5800 struct hdmi_avi_infoframe avi_frame;
5802 memset(&hv_frame, 0, sizeof(hv_frame));
5803 memset(&avi_frame, 0, sizeof(avi_frame));
5805 timing_out->h_border_left = 0;
5806 timing_out->h_border_right = 0;
5807 timing_out->v_border_top = 0;
5808 timing_out->v_border_bottom = 0;
5809 /* TODO: un-hardcode */
5810 if (drm_mode_is_420_only(info, mode_in)
5811 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5812 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5813 else if (drm_mode_is_420_also(info, mode_in)
5814 && aconnector->force_yuv420_output)
5815 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5816 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5817 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5818 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5820 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5822 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5823 timing_out->display_color_depth = convert_color_depth_from_display_info(
5825 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5827 timing_out->scan_type = SCANNING_TYPE_NODATA;
5828 timing_out->hdmi_vic = 0;
5831 timing_out->vic = old_stream->timing.vic;
5832 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5833 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5835 timing_out->vic = drm_match_cea_mode(mode_in);
5836 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5837 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5838 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5839 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5842 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5843 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5844 timing_out->vic = avi_frame.video_code;
5845 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5846 timing_out->hdmi_vic = hv_frame.vic;
5849 if (is_freesync_video_mode(mode_in, aconnector)) {
5850 timing_out->h_addressable = mode_in->hdisplay;
5851 timing_out->h_total = mode_in->htotal;
5852 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5853 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5854 timing_out->v_total = mode_in->vtotal;
5855 timing_out->v_addressable = mode_in->vdisplay;
5856 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5857 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5858 timing_out->pix_clk_100hz = mode_in->clock * 10;
5860 timing_out->h_addressable = mode_in->crtc_hdisplay;
5861 timing_out->h_total = mode_in->crtc_htotal;
5862 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5863 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5864 timing_out->v_total = mode_in->crtc_vtotal;
5865 timing_out->v_addressable = mode_in->crtc_vdisplay;
5866 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5867 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5868 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5871 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5873 stream->output_color_space = get_output_color_space(timing_out);
5875 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5876 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5877 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5878 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5879 drm_mode_is_420_also(info, mode_in) &&
5880 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5881 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5882 adjust_colour_depth_from_display_info(timing_out, info);
5887 static void fill_audio_info(struct audio_info *audio_info,
5888 const struct drm_connector *drm_connector,
5889 const struct dc_sink *dc_sink)
5892 int cea_revision = 0;
5893 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5895 audio_info->manufacture_id = edid_caps->manufacturer_id;
5896 audio_info->product_id = edid_caps->product_id;
5898 cea_revision = drm_connector->display_info.cea_rev;
5900 strscpy(audio_info->display_name,
5901 edid_caps->display_name,
5902 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5904 if (cea_revision >= 3) {
5905 audio_info->mode_count = edid_caps->audio_mode_count;
5907 for (i = 0; i < audio_info->mode_count; ++i) {
5908 audio_info->modes[i].format_code =
5909 (enum audio_format_code)
5910 (edid_caps->audio_modes[i].format_code);
5911 audio_info->modes[i].channel_count =
5912 edid_caps->audio_modes[i].channel_count;
5913 audio_info->modes[i].sample_rates.all =
5914 edid_caps->audio_modes[i].sample_rate;
5915 audio_info->modes[i].sample_size =
5916 edid_caps->audio_modes[i].sample_size;
5920 audio_info->flags.all = edid_caps->speaker_flags;
5922 /* TODO: We only check for the progressive mode, check for interlace mode too */
5923 if (drm_connector->latency_present[0]) {
5924 audio_info->video_latency = drm_connector->video_latency[0];
5925 audio_info->audio_latency = drm_connector->audio_latency[0];
5928 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5933 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5934 struct drm_display_mode *dst_mode)
5936 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5937 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5938 dst_mode->crtc_clock = src_mode->crtc_clock;
5939 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5940 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5941 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5942 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5943 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5944 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5945 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5946 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5947 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5948 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5949 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5953 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5954 const struct drm_display_mode *native_mode,
5957 if (scale_enabled) {
5958 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5959 } else if (native_mode->clock == drm_mode->clock &&
5960 native_mode->htotal == drm_mode->htotal &&
5961 native_mode->vtotal == drm_mode->vtotal) {
5962 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5964 /* no scaling nor amdgpu inserted, no need to patch */
5968 static struct dc_sink *
5969 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5971 struct dc_sink_init_data sink_init_data = { 0 };
5972 struct dc_sink *sink = NULL;
5973 sink_init_data.link = aconnector->dc_link;
5974 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5976 sink = dc_sink_create(&sink_init_data);
5978 DRM_ERROR("Failed to create sink!\n");
5981 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5986 static void set_multisync_trigger_params(
5987 struct dc_stream_state *stream)
5989 struct dc_stream_state *master = NULL;
5991 if (stream->triggered_crtc_reset.enabled) {
5992 master = stream->triggered_crtc_reset.event_source;
5993 stream->triggered_crtc_reset.event =
5994 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5995 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5996 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6000 static void set_master_stream(struct dc_stream_state *stream_set[],
6003 int j, highest_rfr = 0, master_stream = 0;
6005 for (j = 0; j < stream_count; j++) {
6006 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6007 int refresh_rate = 0;
6009 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6010 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6011 if (refresh_rate > highest_rfr) {
6012 highest_rfr = refresh_rate;
6017 for (j = 0; j < stream_count; j++) {
6019 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6023 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6026 struct dc_stream_state *stream;
6028 if (context->stream_count < 2)
6030 for (i = 0; i < context->stream_count ; i++) {
6031 if (!context->streams[i])
6034 * TODO: add a function to read AMD VSDB bits and set
6035 * crtc_sync_master.multi_sync_enabled flag
6036 * For now it's set to false
6040 set_master_stream(context->streams, context->stream_count);
6042 for (i = 0; i < context->stream_count ; i++) {
6043 stream = context->streams[i];
6048 set_multisync_trigger_params(stream);
6052 #if defined(CONFIG_DRM_AMD_DC_DCN)
6053 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6054 struct dc_sink *sink, struct dc_stream_state *stream,
6055 struct dsc_dec_dpcd_caps *dsc_caps)
6057 stream->timing.flags.DSC = 0;
6059 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6060 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6061 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6062 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6067 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6068 struct dc_sink *sink, struct dc_stream_state *stream,
6069 struct dsc_dec_dpcd_caps *dsc_caps)
6071 struct drm_connector *drm_connector = &aconnector->base;
6072 uint32_t link_bandwidth_kbps;
6073 uint32_t max_dsc_target_bpp_limit_override = 0;
6075 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6076 dc_link_get_link_cap(aconnector->dc_link));
6078 if (stream->link && stream->link->local_sink)
6079 max_dsc_target_bpp_limit_override =
6080 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6082 /* Set DSC policy according to dsc_clock_en */
6083 dc_dsc_policy_set_enable_dsc_when_not_needed(
6084 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6086 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6088 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6090 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6091 max_dsc_target_bpp_limit_override,
6092 link_bandwidth_kbps,
6094 &stream->timing.dsc_cfg)) {
6095 stream->timing.flags.DSC = 1;
6096 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6100 /* Overwrite the stream flag if DSC is enabled through debugfs */
6101 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6102 stream->timing.flags.DSC = 1;
6104 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6105 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6107 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6108 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6110 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6111 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6113 #endif /* CONFIG_DRM_AMD_DC_DCN */
6116 * DOC: FreeSync Video
6118 * When a userspace application wants to play a video, the content follows a
6119 * standard format definition that usually specifies the FPS for that format.
6120 * The below list illustrates some video format and the expected FPS,
6123 * - TV/NTSC (23.976 FPS)
6126 * - TV/NTSC (29.97 FPS)
6127 * - TV/NTSC (30 FPS)
6128 * - Cinema HFR (48 FPS)
6130 * - Commonly used (60 FPS)
6131 * - Multiples of 24 (48,72,96,120 FPS)
6133 * The list of standards video format is not huge and can be added to the
6134 * connector modeset list beforehand. With that, userspace can leverage
6135 * FreeSync to extends the front porch in order to attain the target refresh
6136 * rate. Such a switch will happen seamlessly, without screen blanking or
6137 * reprogramming of the output in any other way. If the userspace requests a
6138 * modesetting change compatible with FreeSync modes that only differ in the
6139 * refresh rate, DC will skip the full update and avoid blink during the
6140 * transition. For example, the video player can change the modesetting from
6141 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6142 * causing any display blink. This same concept can be applied to a mode
6145 static struct drm_display_mode *
6146 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6147 bool use_probed_modes)
6149 struct drm_display_mode *m, *m_pref = NULL;
6150 u16 current_refresh, highest_refresh;
6151 struct list_head *list_head = use_probed_modes ?
6152 &aconnector->base.probed_modes :
6153 &aconnector->base.modes;
6155 if (aconnector->freesync_vid_base.clock != 0)
6156 return &aconnector->freesync_vid_base;
6158 /* Find the preferred mode */
6159 list_for_each_entry (m, list_head, head) {
6160 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6167 /* Probably an EDID with no preferred mode. Fallback to first entry */
6168 m_pref = list_first_entry_or_null(
6169 &aconnector->base.modes, struct drm_display_mode, head);
6171 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6176 highest_refresh = drm_mode_vrefresh(m_pref);
6179 * Find the mode with highest refresh rate with same resolution.
6180 * For some monitors, preferred mode is not the mode with highest
6181 * supported refresh rate.
6183 list_for_each_entry (m, list_head, head) {
6184 current_refresh = drm_mode_vrefresh(m);
6186 if (m->hdisplay == m_pref->hdisplay &&
6187 m->vdisplay == m_pref->vdisplay &&
6188 highest_refresh < current_refresh) {
6189 highest_refresh = current_refresh;
6194 aconnector->freesync_vid_base = *m_pref;
6198 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6199 struct amdgpu_dm_connector *aconnector)
6201 struct drm_display_mode *high_mode;
6204 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6205 if (!high_mode || !mode)
6208 timing_diff = high_mode->vtotal - mode->vtotal;
6210 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6211 high_mode->hdisplay != mode->hdisplay ||
6212 high_mode->vdisplay != mode->vdisplay ||
6213 high_mode->hsync_start != mode->hsync_start ||
6214 high_mode->hsync_end != mode->hsync_end ||
6215 high_mode->htotal != mode->htotal ||
6216 high_mode->hskew != mode->hskew ||
6217 high_mode->vscan != mode->vscan ||
6218 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6219 high_mode->vsync_end - mode->vsync_end != timing_diff)
6225 static struct dc_stream_state *
6226 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6227 const struct drm_display_mode *drm_mode,
6228 const struct dm_connector_state *dm_state,
6229 const struct dc_stream_state *old_stream,
6232 struct drm_display_mode *preferred_mode = NULL;
6233 struct drm_connector *drm_connector;
6234 const struct drm_connector_state *con_state =
6235 dm_state ? &dm_state->base : NULL;
6236 struct dc_stream_state *stream = NULL;
6237 struct drm_display_mode mode = *drm_mode;
6238 struct drm_display_mode saved_mode;
6239 struct drm_display_mode *freesync_mode = NULL;
6240 bool native_mode_found = false;
6241 bool recalculate_timing = false;
6242 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6244 int preferred_refresh = 0;
6245 #if defined(CONFIG_DRM_AMD_DC_DCN)
6246 struct dsc_dec_dpcd_caps dsc_caps;
6248 struct dc_sink *sink = NULL;
6250 memset(&saved_mode, 0, sizeof(saved_mode));
6252 if (aconnector == NULL) {
6253 DRM_ERROR("aconnector is NULL!\n");
6257 drm_connector = &aconnector->base;
6259 if (!aconnector->dc_sink) {
6260 sink = create_fake_sink(aconnector);
6264 sink = aconnector->dc_sink;
6265 dc_sink_retain(sink);
6268 stream = dc_create_stream_for_sink(sink);
6270 if (stream == NULL) {
6271 DRM_ERROR("Failed to create stream for sink!\n");
6275 stream->dm_stream_context = aconnector;
6277 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6278 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6280 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6281 /* Search for preferred mode */
6282 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6283 native_mode_found = true;
6287 if (!native_mode_found)
6288 preferred_mode = list_first_entry_or_null(
6289 &aconnector->base.modes,
6290 struct drm_display_mode,
6293 mode_refresh = drm_mode_vrefresh(&mode);
6295 if (preferred_mode == NULL) {
6297 * This may not be an error, the use case is when we have no
6298 * usermode calls to reset and set mode upon hotplug. In this
6299 * case, we call set mode ourselves to restore the previous mode
6300 * and the modelist may not be filled in in time.
6302 DRM_DEBUG_DRIVER("No preferred mode found\n");
6304 recalculate_timing = amdgpu_freesync_vid_mode &&
6305 is_freesync_video_mode(&mode, aconnector);
6306 if (recalculate_timing) {
6307 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6309 mode = *freesync_mode;
6311 decide_crtc_timing_for_drm_display_mode(
6312 &mode, preferred_mode, scale);
6314 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6318 if (recalculate_timing)
6319 drm_mode_set_crtcinfo(&saved_mode, 0);
6321 drm_mode_set_crtcinfo(&mode, 0);
6324 * If scaling is enabled and refresh rate didn't change
6325 * we copy the vic and polarities of the old timings
6327 if (!scale || mode_refresh != preferred_refresh)
6328 fill_stream_properties_from_drm_display_mode(
6329 stream, &mode, &aconnector->base, con_state, NULL,
6332 fill_stream_properties_from_drm_display_mode(
6333 stream, &mode, &aconnector->base, con_state, old_stream,
6336 #if defined(CONFIG_DRM_AMD_DC_DCN)
6337 /* SST DSC determination policy */
6338 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6339 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6340 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6343 update_stream_scaling_settings(&mode, dm_state, stream);
6346 &stream->audio_info,
6350 update_stream_signal(stream, sink);
6352 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6353 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6355 if (stream->link->psr_settings.psr_feature_enabled) {
6357 // should decide stream support vsc sdp colorimetry capability
6358 // before building vsc info packet
6360 stream->use_vsc_sdp_for_colorimetry = false;
6361 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6362 stream->use_vsc_sdp_for_colorimetry =
6363 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6365 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6366 stream->use_vsc_sdp_for_colorimetry = true;
6368 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6369 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6373 dc_sink_release(sink);
6378 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6380 drm_crtc_cleanup(crtc);
6384 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6385 struct drm_crtc_state *state)
6387 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6389 /* TODO Destroy dc_stream objects are stream object is flattened */
6391 dc_stream_release(cur->stream);
6394 __drm_atomic_helper_crtc_destroy_state(state);
6400 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6402 struct dm_crtc_state *state;
6405 dm_crtc_destroy_state(crtc, crtc->state);
6407 state = kzalloc(sizeof(*state), GFP_KERNEL);
6408 if (WARN_ON(!state))
6411 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6414 static struct drm_crtc_state *
6415 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6417 struct dm_crtc_state *state, *cur;
6419 cur = to_dm_crtc_state(crtc->state);
6421 if (WARN_ON(!crtc->state))
6424 state = kzalloc(sizeof(*state), GFP_KERNEL);
6428 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6431 state->stream = cur->stream;
6432 dc_stream_retain(state->stream);
6435 state->active_planes = cur->active_planes;
6436 state->vrr_infopacket = cur->vrr_infopacket;
6437 state->abm_level = cur->abm_level;
6438 state->vrr_supported = cur->vrr_supported;
6439 state->freesync_config = cur->freesync_config;
6440 state->cm_has_degamma = cur->cm_has_degamma;
6441 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6442 state->force_dpms_off = cur->force_dpms_off;
6443 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6445 return &state->base;
6448 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6449 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6451 crtc_debugfs_init(crtc);
6457 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6459 enum dc_irq_source irq_source;
6460 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6461 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6464 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6466 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6468 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6469 acrtc->crtc_id, enable ? "en" : "dis", rc);
6473 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6475 enum dc_irq_source irq_source;
6476 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6477 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6478 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6479 #if defined(CONFIG_DRM_AMD_DC_DCN)
6480 struct amdgpu_display_manager *dm = &adev->dm;
6481 struct vblank_control_work *work;
6486 /* vblank irq on -> Only need vupdate irq in vrr mode */
6487 if (amdgpu_dm_vrr_active(acrtc_state))
6488 rc = dm_set_vupdate_irq(crtc, true);
6490 /* vblank irq off -> vupdate irq off */
6491 rc = dm_set_vupdate_irq(crtc, false);
6497 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6499 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6502 if (amdgpu_in_reset(adev))
6505 #if defined(CONFIG_DRM_AMD_DC_DCN)
6506 if (dm->vblank_control_workqueue) {
6507 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6511 INIT_WORK(&work->work, vblank_control_worker);
6513 work->acrtc = acrtc;
6514 work->enable = enable;
6516 if (acrtc_state->stream) {
6517 dc_stream_retain(acrtc_state->stream);
6518 work->stream = acrtc_state->stream;
6521 queue_work(dm->vblank_control_workqueue, &work->work);
6528 static int dm_enable_vblank(struct drm_crtc *crtc)
6530 return dm_set_vblank(crtc, true);
6533 static void dm_disable_vblank(struct drm_crtc *crtc)
6535 dm_set_vblank(crtc, false);
6538 /* Implemented only the options currently availible for the driver */
6539 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6540 .reset = dm_crtc_reset_state,
6541 .destroy = amdgpu_dm_crtc_destroy,
6542 .set_config = drm_atomic_helper_set_config,
6543 .page_flip = drm_atomic_helper_page_flip,
6544 .atomic_duplicate_state = dm_crtc_duplicate_state,
6545 .atomic_destroy_state = dm_crtc_destroy_state,
6546 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6547 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6548 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6549 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6550 .enable_vblank = dm_enable_vblank,
6551 .disable_vblank = dm_disable_vblank,
6552 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6553 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6554 .late_register = amdgpu_dm_crtc_late_register,
6558 static enum drm_connector_status
6559 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6562 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6566 * 1. This interface is NOT called in context of HPD irq.
6567 * 2. This interface *is called* in context of user-mode ioctl. Which
6568 * makes it a bad place for *any* MST-related activity.
6571 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6572 !aconnector->fake_enable)
6573 connected = (aconnector->dc_sink != NULL);
6575 connected = (aconnector->base.force == DRM_FORCE_ON);
6577 update_subconnector_property(aconnector);
6579 return (connected ? connector_status_connected :
6580 connector_status_disconnected);
6583 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6584 struct drm_connector_state *connector_state,
6585 struct drm_property *property,
6588 struct drm_device *dev = connector->dev;
6589 struct amdgpu_device *adev = drm_to_adev(dev);
6590 struct dm_connector_state *dm_old_state =
6591 to_dm_connector_state(connector->state);
6592 struct dm_connector_state *dm_new_state =
6593 to_dm_connector_state(connector_state);
6597 if (property == dev->mode_config.scaling_mode_property) {
6598 enum amdgpu_rmx_type rmx_type;
6601 case DRM_MODE_SCALE_CENTER:
6602 rmx_type = RMX_CENTER;
6604 case DRM_MODE_SCALE_ASPECT:
6605 rmx_type = RMX_ASPECT;
6607 case DRM_MODE_SCALE_FULLSCREEN:
6608 rmx_type = RMX_FULL;
6610 case DRM_MODE_SCALE_NONE:
6616 if (dm_old_state->scaling == rmx_type)
6619 dm_new_state->scaling = rmx_type;
6621 } else if (property == adev->mode_info.underscan_hborder_property) {
6622 dm_new_state->underscan_hborder = val;
6624 } else if (property == adev->mode_info.underscan_vborder_property) {
6625 dm_new_state->underscan_vborder = val;
6627 } else if (property == adev->mode_info.underscan_property) {
6628 dm_new_state->underscan_enable = val;
6630 } else if (property == adev->mode_info.abm_level_property) {
6631 dm_new_state->abm_level = val;
6638 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6639 const struct drm_connector_state *state,
6640 struct drm_property *property,
6643 struct drm_device *dev = connector->dev;
6644 struct amdgpu_device *adev = drm_to_adev(dev);
6645 struct dm_connector_state *dm_state =
6646 to_dm_connector_state(state);
6649 if (property == dev->mode_config.scaling_mode_property) {
6650 switch (dm_state->scaling) {
6652 *val = DRM_MODE_SCALE_CENTER;
6655 *val = DRM_MODE_SCALE_ASPECT;
6658 *val = DRM_MODE_SCALE_FULLSCREEN;
6662 *val = DRM_MODE_SCALE_NONE;
6666 } else if (property == adev->mode_info.underscan_hborder_property) {
6667 *val = dm_state->underscan_hborder;
6669 } else if (property == adev->mode_info.underscan_vborder_property) {
6670 *val = dm_state->underscan_vborder;
6672 } else if (property == adev->mode_info.underscan_property) {
6673 *val = dm_state->underscan_enable;
6675 } else if (property == adev->mode_info.abm_level_property) {
6676 *val = dm_state->abm_level;
6683 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6685 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6687 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6690 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6692 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6693 const struct dc_link *link = aconnector->dc_link;
6694 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6695 struct amdgpu_display_manager *dm = &adev->dm;
6699 * Call only if mst_mgr was iniitalized before since it's not done
6700 * for all connector types.
6702 if (aconnector->mst_mgr.dev)
6703 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6705 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6706 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6707 for (i = 0; i < dm->num_of_edps; i++) {
6708 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6709 backlight_device_unregister(dm->backlight_dev[i]);
6710 dm->backlight_dev[i] = NULL;
6715 if (aconnector->dc_em_sink)
6716 dc_sink_release(aconnector->dc_em_sink);
6717 aconnector->dc_em_sink = NULL;
6718 if (aconnector->dc_sink)
6719 dc_sink_release(aconnector->dc_sink);
6720 aconnector->dc_sink = NULL;
6722 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6723 drm_connector_unregister(connector);
6724 drm_connector_cleanup(connector);
6725 if (aconnector->i2c) {
6726 i2c_del_adapter(&aconnector->i2c->base);
6727 kfree(aconnector->i2c);
6729 kfree(aconnector->dm_dp_aux.aux.name);
6734 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6736 struct dm_connector_state *state =
6737 to_dm_connector_state(connector->state);
6739 if (connector->state)
6740 __drm_atomic_helper_connector_destroy_state(connector->state);
6744 state = kzalloc(sizeof(*state), GFP_KERNEL);
6747 state->scaling = RMX_OFF;
6748 state->underscan_enable = false;
6749 state->underscan_hborder = 0;
6750 state->underscan_vborder = 0;
6751 state->base.max_requested_bpc = 8;
6752 state->vcpi_slots = 0;
6754 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6755 state->abm_level = amdgpu_dm_abm_level;
6757 __drm_atomic_helper_connector_reset(connector, &state->base);
6761 struct drm_connector_state *
6762 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6764 struct dm_connector_state *state =
6765 to_dm_connector_state(connector->state);
6767 struct dm_connector_state *new_state =
6768 kmemdup(state, sizeof(*state), GFP_KERNEL);
6773 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6775 new_state->freesync_capable = state->freesync_capable;
6776 new_state->abm_level = state->abm_level;
6777 new_state->scaling = state->scaling;
6778 new_state->underscan_enable = state->underscan_enable;
6779 new_state->underscan_hborder = state->underscan_hborder;
6780 new_state->underscan_vborder = state->underscan_vborder;
6781 new_state->vcpi_slots = state->vcpi_slots;
6782 new_state->pbn = state->pbn;
6783 return &new_state->base;
6787 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6789 struct amdgpu_dm_connector *amdgpu_dm_connector =
6790 to_amdgpu_dm_connector(connector);
6793 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6794 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6795 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6796 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6801 #if defined(CONFIG_DEBUG_FS)
6802 connector_debugfs_init(amdgpu_dm_connector);
6808 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6809 .reset = amdgpu_dm_connector_funcs_reset,
6810 .detect = amdgpu_dm_connector_detect,
6811 .fill_modes = drm_helper_probe_single_connector_modes,
6812 .destroy = amdgpu_dm_connector_destroy,
6813 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6814 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6815 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6816 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6817 .late_register = amdgpu_dm_connector_late_register,
6818 .early_unregister = amdgpu_dm_connector_unregister
6821 static int get_modes(struct drm_connector *connector)
6823 return amdgpu_dm_connector_get_modes(connector);
6826 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6828 struct dc_sink_init_data init_params = {
6829 .link = aconnector->dc_link,
6830 .sink_signal = SIGNAL_TYPE_VIRTUAL
6834 if (!aconnector->base.edid_blob_ptr) {
6835 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6836 aconnector->base.name);
6838 aconnector->base.force = DRM_FORCE_OFF;
6839 aconnector->base.override_edid = false;
6843 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6845 aconnector->edid = edid;
6847 aconnector->dc_em_sink = dc_link_add_remote_sink(
6848 aconnector->dc_link,
6850 (edid->extensions + 1) * EDID_LENGTH,
6853 if (aconnector->base.force == DRM_FORCE_ON) {
6854 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6855 aconnector->dc_link->local_sink :
6856 aconnector->dc_em_sink;
6857 dc_sink_retain(aconnector->dc_sink);
6861 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6863 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6866 * In case of headless boot with force on for DP managed connector
6867 * Those settings have to be != 0 to get initial modeset
6869 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6870 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6871 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6875 aconnector->base.override_edid = true;
6876 create_eml_sink(aconnector);
6879 static struct dc_stream_state *
6880 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6881 const struct drm_display_mode *drm_mode,
6882 const struct dm_connector_state *dm_state,
6883 const struct dc_stream_state *old_stream)
6885 struct drm_connector *connector = &aconnector->base;
6886 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6887 struct dc_stream_state *stream;
6888 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6889 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6890 enum dc_status dc_result = DC_OK;
6893 stream = create_stream_for_sink(aconnector, drm_mode,
6894 dm_state, old_stream,
6896 if (stream == NULL) {
6897 DRM_ERROR("Failed to create stream for sink!\n");
6901 dc_result = dc_validate_stream(adev->dm.dc, stream);
6903 if (dc_result != DC_OK) {
6904 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6909 dc_status_to_str(dc_result));
6911 dc_stream_release(stream);
6913 requested_bpc -= 2; /* lower bpc to retry validation */
6916 } while (stream == NULL && requested_bpc >= 6);
6918 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6919 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6921 aconnector->force_yuv420_output = true;
6922 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6923 dm_state, old_stream);
6924 aconnector->force_yuv420_output = false;
6930 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6931 struct drm_display_mode *mode)
6933 int result = MODE_ERROR;
6934 struct dc_sink *dc_sink;
6935 /* TODO: Unhardcode stream count */
6936 struct dc_stream_state *stream;
6937 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6939 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6940 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6944 * Only run this the first time mode_valid is called to initilialize
6947 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6948 !aconnector->dc_em_sink)
6949 handle_edid_mgmt(aconnector);
6951 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6953 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6954 aconnector->base.force != DRM_FORCE_ON) {
6955 DRM_ERROR("dc_sink is NULL!\n");
6959 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6961 dc_stream_release(stream);
6966 /* TODO: error handling*/
6970 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6971 struct dc_info_packet *out)
6973 struct hdmi_drm_infoframe frame;
6974 unsigned char buf[30]; /* 26 + 4 */
6978 memset(out, 0, sizeof(*out));
6980 if (!state->hdr_output_metadata)
6983 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6987 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6991 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6995 /* Prepare the infopacket for DC. */
6996 switch (state->connector->connector_type) {
6997 case DRM_MODE_CONNECTOR_HDMIA:
6998 out->hb0 = 0x87; /* type */
6999 out->hb1 = 0x01; /* version */
7000 out->hb2 = 0x1A; /* length */
7001 out->sb[0] = buf[3]; /* checksum */
7005 case DRM_MODE_CONNECTOR_DisplayPort:
7006 case DRM_MODE_CONNECTOR_eDP:
7007 out->hb0 = 0x00; /* sdp id, zero */
7008 out->hb1 = 0x87; /* type */
7009 out->hb2 = 0x1D; /* payload len - 1 */
7010 out->hb3 = (0x13 << 2); /* sdp version */
7011 out->sb[0] = 0x01; /* version */
7012 out->sb[1] = 0x1A; /* length */
7020 memcpy(&out->sb[i], &buf[4], 26);
7023 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7024 sizeof(out->sb), false);
7030 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7031 struct drm_atomic_state *state)
7033 struct drm_connector_state *new_con_state =
7034 drm_atomic_get_new_connector_state(state, conn);
7035 struct drm_connector_state *old_con_state =
7036 drm_atomic_get_old_connector_state(state, conn);
7037 struct drm_crtc *crtc = new_con_state->crtc;
7038 struct drm_crtc_state *new_crtc_state;
7041 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7046 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7047 struct dc_info_packet hdr_infopacket;
7049 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7053 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7054 if (IS_ERR(new_crtc_state))
7055 return PTR_ERR(new_crtc_state);
7058 * DC considers the stream backends changed if the
7059 * static metadata changes. Forcing the modeset also
7060 * gives a simple way for userspace to switch from
7061 * 8bpc to 10bpc when setting the metadata to enter
7064 * Changing the static metadata after it's been
7065 * set is permissible, however. So only force a
7066 * modeset if we're entering or exiting HDR.
7068 new_crtc_state->mode_changed =
7069 !old_con_state->hdr_output_metadata ||
7070 !new_con_state->hdr_output_metadata;
7076 static const struct drm_connector_helper_funcs
7077 amdgpu_dm_connector_helper_funcs = {
7079 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7080 * modes will be filtered by drm_mode_validate_size(), and those modes
7081 * are missing after user start lightdm. So we need to renew modes list.
7082 * in get_modes call back, not just return the modes count
7084 .get_modes = get_modes,
7085 .mode_valid = amdgpu_dm_connector_mode_valid,
7086 .atomic_check = amdgpu_dm_connector_atomic_check,
7089 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7093 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7095 struct drm_atomic_state *state = new_crtc_state->state;
7096 struct drm_plane *plane;
7099 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7100 struct drm_plane_state *new_plane_state;
7102 /* Cursor planes are "fake". */
7103 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7106 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7108 if (!new_plane_state) {
7110 * The plane is enable on the CRTC and hasn't changed
7111 * state. This means that it previously passed
7112 * validation and is therefore enabled.
7118 /* We need a framebuffer to be considered enabled. */
7119 num_active += (new_plane_state->fb != NULL);
7125 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7126 struct drm_crtc_state *new_crtc_state)
7128 struct dm_crtc_state *dm_new_crtc_state =
7129 to_dm_crtc_state(new_crtc_state);
7131 dm_new_crtc_state->active_planes = 0;
7133 if (!dm_new_crtc_state->stream)
7136 dm_new_crtc_state->active_planes =
7137 count_crtc_active_planes(new_crtc_state);
7140 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7141 struct drm_atomic_state *state)
7143 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7145 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7146 struct dc *dc = adev->dm.dc;
7147 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7150 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7152 dm_update_crtc_active_planes(crtc, crtc_state);
7154 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7155 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7160 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7161 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7162 * planes are disabled, which is not supported by the hardware. And there is legacy
7163 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7165 if (crtc_state->enable &&
7166 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7167 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7171 /* In some use cases, like reset, no stream is attached */
7172 if (!dm_crtc_state->stream)
7175 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7178 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7182 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7183 const struct drm_display_mode *mode,
7184 struct drm_display_mode *adjusted_mode)
7189 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7190 .disable = dm_crtc_helper_disable,
7191 .atomic_check = dm_crtc_helper_atomic_check,
7192 .mode_fixup = dm_crtc_helper_mode_fixup,
7193 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7196 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7201 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7203 switch (display_color_depth) {
7204 case COLOR_DEPTH_666:
7206 case COLOR_DEPTH_888:
7208 case COLOR_DEPTH_101010:
7210 case COLOR_DEPTH_121212:
7212 case COLOR_DEPTH_141414:
7214 case COLOR_DEPTH_161616:
7222 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7223 struct drm_crtc_state *crtc_state,
7224 struct drm_connector_state *conn_state)
7226 struct drm_atomic_state *state = crtc_state->state;
7227 struct drm_connector *connector = conn_state->connector;
7228 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7229 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7230 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7231 struct drm_dp_mst_topology_mgr *mst_mgr;
7232 struct drm_dp_mst_port *mst_port;
7233 enum dc_color_depth color_depth;
7235 bool is_y420 = false;
7237 if (!aconnector->port || !aconnector->dc_sink)
7240 mst_port = aconnector->port;
7241 mst_mgr = &aconnector->mst_port->mst_mgr;
7243 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7246 if (!state->duplicated) {
7247 int max_bpc = conn_state->max_requested_bpc;
7248 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7249 aconnector->force_yuv420_output;
7250 color_depth = convert_color_depth_from_display_info(connector,
7253 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7254 clock = adjusted_mode->clock;
7255 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7257 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7260 dm_new_connector_state->pbn,
7261 dm_mst_get_pbn_divider(aconnector->dc_link));
7262 if (dm_new_connector_state->vcpi_slots < 0) {
7263 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7264 return dm_new_connector_state->vcpi_slots;
7269 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7270 .disable = dm_encoder_helper_disable,
7271 .atomic_check = dm_encoder_helper_atomic_check
7274 #if defined(CONFIG_DRM_AMD_DC_DCN)
7275 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7276 struct dc_state *dc_state,
7277 struct dsc_mst_fairness_vars *vars)
7279 struct dc_stream_state *stream = NULL;
7280 struct drm_connector *connector;
7281 struct drm_connector_state *new_con_state;
7282 struct amdgpu_dm_connector *aconnector;
7283 struct dm_connector_state *dm_conn_state;
7285 int vcpi, pbn_div, pbn, slot_num = 0;
7287 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7289 aconnector = to_amdgpu_dm_connector(connector);
7291 if (!aconnector->port)
7294 if (!new_con_state || !new_con_state->crtc)
7297 dm_conn_state = to_dm_connector_state(new_con_state);
7299 for (j = 0; j < dc_state->stream_count; j++) {
7300 stream = dc_state->streams[j];
7304 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7313 pbn_div = dm_mst_get_pbn_divider(stream->link);
7314 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7315 for (j = 0; j < dc_state->stream_count; j++) {
7316 if (vars[j].aconnector == aconnector) {
7322 if (j == dc_state->stream_count)
7325 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7327 if (stream->timing.flags.DSC != 1) {
7328 dm_conn_state->pbn = pbn;
7329 dm_conn_state->vcpi_slots = slot_num;
7331 drm_dp_mst_atomic_enable_dsc(state,
7339 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7346 dm_conn_state->pbn = pbn;
7347 dm_conn_state->vcpi_slots = vcpi;
7353 static void dm_drm_plane_reset(struct drm_plane *plane)
7355 struct dm_plane_state *amdgpu_state = NULL;
7358 plane->funcs->atomic_destroy_state(plane, plane->state);
7360 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7361 WARN_ON(amdgpu_state == NULL);
7364 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7367 static struct drm_plane_state *
7368 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7370 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7372 old_dm_plane_state = to_dm_plane_state(plane->state);
7373 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7374 if (!dm_plane_state)
7377 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7379 if (old_dm_plane_state->dc_state) {
7380 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7381 dc_plane_state_retain(dm_plane_state->dc_state);
7384 return &dm_plane_state->base;
7387 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7388 struct drm_plane_state *state)
7390 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7392 if (dm_plane_state->dc_state)
7393 dc_plane_state_release(dm_plane_state->dc_state);
7395 drm_atomic_helper_plane_destroy_state(plane, state);
7398 static const struct drm_plane_funcs dm_plane_funcs = {
7399 .update_plane = drm_atomic_helper_update_plane,
7400 .disable_plane = drm_atomic_helper_disable_plane,
7401 .destroy = drm_primary_helper_destroy,
7402 .reset = dm_drm_plane_reset,
7403 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7404 .atomic_destroy_state = dm_drm_plane_destroy_state,
7405 .format_mod_supported = dm_plane_format_mod_supported,
7408 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7409 struct drm_plane_state *new_state)
7411 struct amdgpu_framebuffer *afb;
7412 struct drm_gem_object *obj;
7413 struct amdgpu_device *adev;
7414 struct amdgpu_bo *rbo;
7415 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7416 struct list_head list;
7417 struct ttm_validate_buffer tv;
7418 struct ww_acquire_ctx ticket;
7422 if (!new_state->fb) {
7423 DRM_DEBUG_KMS("No FB bound\n");
7427 afb = to_amdgpu_framebuffer(new_state->fb);
7428 obj = new_state->fb->obj[0];
7429 rbo = gem_to_amdgpu_bo(obj);
7430 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7431 INIT_LIST_HEAD(&list);
7435 list_add(&tv.head, &list);
7437 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7439 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7443 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7444 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7446 domain = AMDGPU_GEM_DOMAIN_VRAM;
7448 r = amdgpu_bo_pin(rbo, domain);
7449 if (unlikely(r != 0)) {
7450 if (r != -ERESTARTSYS)
7451 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7452 ttm_eu_backoff_reservation(&ticket, &list);
7456 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7457 if (unlikely(r != 0)) {
7458 amdgpu_bo_unpin(rbo);
7459 ttm_eu_backoff_reservation(&ticket, &list);
7460 DRM_ERROR("%p bind failed\n", rbo);
7464 ttm_eu_backoff_reservation(&ticket, &list);
7466 afb->address = amdgpu_bo_gpu_offset(rbo);
7471 * We don't do surface updates on planes that have been newly created,
7472 * but we also don't have the afb->address during atomic check.
7474 * Fill in buffer attributes depending on the address here, but only on
7475 * newly created planes since they're not being used by DC yet and this
7476 * won't modify global state.
7478 dm_plane_state_old = to_dm_plane_state(plane->state);
7479 dm_plane_state_new = to_dm_plane_state(new_state);
7481 if (dm_plane_state_new->dc_state &&
7482 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7483 struct dc_plane_state *plane_state =
7484 dm_plane_state_new->dc_state;
7485 bool force_disable_dcc = !plane_state->dcc.enable;
7487 fill_plane_buffer_attributes(
7488 adev, afb, plane_state->format, plane_state->rotation,
7490 &plane_state->tiling_info, &plane_state->plane_size,
7491 &plane_state->dcc, &plane_state->address,
7492 afb->tmz_surface, force_disable_dcc);
7498 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7499 struct drm_plane_state *old_state)
7501 struct amdgpu_bo *rbo;
7507 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7508 r = amdgpu_bo_reserve(rbo, false);
7510 DRM_ERROR("failed to reserve rbo before unpin\n");
7514 amdgpu_bo_unpin(rbo);
7515 amdgpu_bo_unreserve(rbo);
7516 amdgpu_bo_unref(&rbo);
7519 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7520 struct drm_crtc_state *new_crtc_state)
7522 struct drm_framebuffer *fb = state->fb;
7523 int min_downscale, max_upscale;
7525 int max_scale = INT_MAX;
7527 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7528 if (fb && state->crtc) {
7529 /* Validate viewport to cover the case when only the position changes */
7530 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7531 int viewport_width = state->crtc_w;
7532 int viewport_height = state->crtc_h;
7534 if (state->crtc_x < 0)
7535 viewport_width += state->crtc_x;
7536 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7537 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7539 if (state->crtc_y < 0)
7540 viewport_height += state->crtc_y;
7541 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7542 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7544 if (viewport_width < 0 || viewport_height < 0) {
7545 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7547 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7548 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7550 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7551 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7557 /* Get min/max allowed scaling factors from plane caps. */
7558 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7559 &min_downscale, &max_upscale);
7561 * Convert to drm convention: 16.16 fixed point, instead of dc's
7562 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7563 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7565 min_scale = (1000 << 16) / max_upscale;
7566 max_scale = (1000 << 16) / min_downscale;
7569 return drm_atomic_helper_check_plane_state(
7570 state, new_crtc_state, min_scale, max_scale, true, true);
7573 static int dm_plane_atomic_check(struct drm_plane *plane,
7574 struct drm_atomic_state *state)
7576 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7578 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7579 struct dc *dc = adev->dm.dc;
7580 struct dm_plane_state *dm_plane_state;
7581 struct dc_scaling_info scaling_info;
7582 struct drm_crtc_state *new_crtc_state;
7585 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7587 dm_plane_state = to_dm_plane_state(new_plane_state);
7589 if (!dm_plane_state->dc_state)
7593 drm_atomic_get_new_crtc_state(state,
7594 new_plane_state->crtc);
7595 if (!new_crtc_state)
7598 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7602 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7606 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7612 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7613 struct drm_atomic_state *state)
7615 /* Only support async updates on cursor planes. */
7616 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7622 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7623 struct drm_atomic_state *state)
7625 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7627 struct drm_plane_state *old_state =
7628 drm_atomic_get_old_plane_state(state, plane);
7630 trace_amdgpu_dm_atomic_update_cursor(new_state);
7632 swap(plane->state->fb, new_state->fb);
7634 plane->state->src_x = new_state->src_x;
7635 plane->state->src_y = new_state->src_y;
7636 plane->state->src_w = new_state->src_w;
7637 plane->state->src_h = new_state->src_h;
7638 plane->state->crtc_x = new_state->crtc_x;
7639 plane->state->crtc_y = new_state->crtc_y;
7640 plane->state->crtc_w = new_state->crtc_w;
7641 plane->state->crtc_h = new_state->crtc_h;
7643 handle_cursor_update(plane, old_state);
7646 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7647 .prepare_fb = dm_plane_helper_prepare_fb,
7648 .cleanup_fb = dm_plane_helper_cleanup_fb,
7649 .atomic_check = dm_plane_atomic_check,
7650 .atomic_async_check = dm_plane_atomic_async_check,
7651 .atomic_async_update = dm_plane_atomic_async_update
7655 * TODO: these are currently initialized to rgb formats only.
7656 * For future use cases we should either initialize them dynamically based on
7657 * plane capabilities, or initialize this array to all formats, so internal drm
7658 * check will succeed, and let DC implement proper check
7660 static const uint32_t rgb_formats[] = {
7661 DRM_FORMAT_XRGB8888,
7662 DRM_FORMAT_ARGB8888,
7663 DRM_FORMAT_RGBA8888,
7664 DRM_FORMAT_XRGB2101010,
7665 DRM_FORMAT_XBGR2101010,
7666 DRM_FORMAT_ARGB2101010,
7667 DRM_FORMAT_ABGR2101010,
7668 DRM_FORMAT_XRGB16161616,
7669 DRM_FORMAT_XBGR16161616,
7670 DRM_FORMAT_ARGB16161616,
7671 DRM_FORMAT_ABGR16161616,
7672 DRM_FORMAT_XBGR8888,
7673 DRM_FORMAT_ABGR8888,
7677 static const uint32_t overlay_formats[] = {
7678 DRM_FORMAT_XRGB8888,
7679 DRM_FORMAT_ARGB8888,
7680 DRM_FORMAT_RGBA8888,
7681 DRM_FORMAT_XBGR8888,
7682 DRM_FORMAT_ABGR8888,
7686 static const u32 cursor_formats[] = {
7690 static int get_plane_formats(const struct drm_plane *plane,
7691 const struct dc_plane_cap *plane_cap,
7692 uint32_t *formats, int max_formats)
7694 int i, num_formats = 0;
7697 * TODO: Query support for each group of formats directly from
7698 * DC plane caps. This will require adding more formats to the
7702 switch (plane->type) {
7703 case DRM_PLANE_TYPE_PRIMARY:
7704 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7705 if (num_formats >= max_formats)
7708 formats[num_formats++] = rgb_formats[i];
7711 if (plane_cap && plane_cap->pixel_format_support.nv12)
7712 formats[num_formats++] = DRM_FORMAT_NV12;
7713 if (plane_cap && plane_cap->pixel_format_support.p010)
7714 formats[num_formats++] = DRM_FORMAT_P010;
7715 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7716 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7717 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7718 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7719 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7723 case DRM_PLANE_TYPE_OVERLAY:
7724 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7725 if (num_formats >= max_formats)
7728 formats[num_formats++] = overlay_formats[i];
7732 case DRM_PLANE_TYPE_CURSOR:
7733 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7734 if (num_formats >= max_formats)
7737 formats[num_formats++] = cursor_formats[i];
7745 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7746 struct drm_plane *plane,
7747 unsigned long possible_crtcs,
7748 const struct dc_plane_cap *plane_cap)
7750 uint32_t formats[32];
7753 unsigned int supported_rotations;
7754 uint64_t *modifiers = NULL;
7756 num_formats = get_plane_formats(plane, plane_cap, formats,
7757 ARRAY_SIZE(formats));
7759 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7763 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7764 &dm_plane_funcs, formats, num_formats,
7765 modifiers, plane->type, NULL);
7770 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7771 plane_cap && plane_cap->per_pixel_alpha) {
7772 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7773 BIT(DRM_MODE_BLEND_PREMULTI);
7775 drm_plane_create_alpha_property(plane);
7776 drm_plane_create_blend_mode_property(plane, blend_caps);
7779 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7781 (plane_cap->pixel_format_support.nv12 ||
7782 plane_cap->pixel_format_support.p010)) {
7783 /* This only affects YUV formats. */
7784 drm_plane_create_color_properties(
7786 BIT(DRM_COLOR_YCBCR_BT601) |
7787 BIT(DRM_COLOR_YCBCR_BT709) |
7788 BIT(DRM_COLOR_YCBCR_BT2020),
7789 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7790 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7791 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7794 supported_rotations =
7795 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7796 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7798 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7799 plane->type != DRM_PLANE_TYPE_CURSOR)
7800 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7801 supported_rotations);
7803 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7805 /* Create (reset) the plane state */
7806 if (plane->funcs->reset)
7807 plane->funcs->reset(plane);
7812 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7813 struct drm_plane *plane,
7814 uint32_t crtc_index)
7816 struct amdgpu_crtc *acrtc = NULL;
7817 struct drm_plane *cursor_plane;
7821 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7825 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7826 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7828 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7832 res = drm_crtc_init_with_planes(
7837 &amdgpu_dm_crtc_funcs, NULL);
7842 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7844 /* Create (reset) the plane state */
7845 if (acrtc->base.funcs->reset)
7846 acrtc->base.funcs->reset(&acrtc->base);
7848 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7849 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7851 acrtc->crtc_id = crtc_index;
7852 acrtc->base.enabled = false;
7853 acrtc->otg_inst = -1;
7855 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7856 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7857 true, MAX_COLOR_LUT_ENTRIES);
7858 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7864 kfree(cursor_plane);
7869 static int to_drm_connector_type(enum signal_type st)
7872 case SIGNAL_TYPE_HDMI_TYPE_A:
7873 return DRM_MODE_CONNECTOR_HDMIA;
7874 case SIGNAL_TYPE_EDP:
7875 return DRM_MODE_CONNECTOR_eDP;
7876 case SIGNAL_TYPE_LVDS:
7877 return DRM_MODE_CONNECTOR_LVDS;
7878 case SIGNAL_TYPE_RGB:
7879 return DRM_MODE_CONNECTOR_VGA;
7880 case SIGNAL_TYPE_DISPLAY_PORT:
7881 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7882 return DRM_MODE_CONNECTOR_DisplayPort;
7883 case SIGNAL_TYPE_DVI_DUAL_LINK:
7884 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7885 return DRM_MODE_CONNECTOR_DVID;
7886 case SIGNAL_TYPE_VIRTUAL:
7887 return DRM_MODE_CONNECTOR_VIRTUAL;
7890 return DRM_MODE_CONNECTOR_Unknown;
7894 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7896 struct drm_encoder *encoder;
7898 /* There is only one encoder per connector */
7899 drm_connector_for_each_possible_encoder(connector, encoder)
7905 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7907 struct drm_encoder *encoder;
7908 struct amdgpu_encoder *amdgpu_encoder;
7910 encoder = amdgpu_dm_connector_to_encoder(connector);
7912 if (encoder == NULL)
7915 amdgpu_encoder = to_amdgpu_encoder(encoder);
7917 amdgpu_encoder->native_mode.clock = 0;
7919 if (!list_empty(&connector->probed_modes)) {
7920 struct drm_display_mode *preferred_mode = NULL;
7922 list_for_each_entry(preferred_mode,
7923 &connector->probed_modes,
7925 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7926 amdgpu_encoder->native_mode = *preferred_mode;
7934 static struct drm_display_mode *
7935 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7937 int hdisplay, int vdisplay)
7939 struct drm_device *dev = encoder->dev;
7940 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7941 struct drm_display_mode *mode = NULL;
7942 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7944 mode = drm_mode_duplicate(dev, native_mode);
7949 mode->hdisplay = hdisplay;
7950 mode->vdisplay = vdisplay;
7951 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7952 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7958 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7959 struct drm_connector *connector)
7961 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7962 struct drm_display_mode *mode = NULL;
7963 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7964 struct amdgpu_dm_connector *amdgpu_dm_connector =
7965 to_amdgpu_dm_connector(connector);
7969 char name[DRM_DISPLAY_MODE_LEN];
7972 } common_modes[] = {
7973 { "640x480", 640, 480},
7974 { "800x600", 800, 600},
7975 { "1024x768", 1024, 768},
7976 { "1280x720", 1280, 720},
7977 { "1280x800", 1280, 800},
7978 {"1280x1024", 1280, 1024},
7979 { "1440x900", 1440, 900},
7980 {"1680x1050", 1680, 1050},
7981 {"1600x1200", 1600, 1200},
7982 {"1920x1080", 1920, 1080},
7983 {"1920x1200", 1920, 1200}
7986 n = ARRAY_SIZE(common_modes);
7988 for (i = 0; i < n; i++) {
7989 struct drm_display_mode *curmode = NULL;
7990 bool mode_existed = false;
7992 if (common_modes[i].w > native_mode->hdisplay ||
7993 common_modes[i].h > native_mode->vdisplay ||
7994 (common_modes[i].w == native_mode->hdisplay &&
7995 common_modes[i].h == native_mode->vdisplay))
7998 list_for_each_entry(curmode, &connector->probed_modes, head) {
7999 if (common_modes[i].w == curmode->hdisplay &&
8000 common_modes[i].h == curmode->vdisplay) {
8001 mode_existed = true;
8009 mode = amdgpu_dm_create_common_mode(encoder,
8010 common_modes[i].name, common_modes[i].w,
8012 drm_mode_probed_add(connector, mode);
8013 amdgpu_dm_connector->num_modes++;
8017 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8019 struct drm_encoder *encoder;
8020 struct amdgpu_encoder *amdgpu_encoder;
8021 const struct drm_display_mode *native_mode;
8023 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8024 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8027 encoder = amdgpu_dm_connector_to_encoder(connector);
8031 amdgpu_encoder = to_amdgpu_encoder(encoder);
8033 native_mode = &amdgpu_encoder->native_mode;
8034 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8037 drm_connector_set_panel_orientation_with_quirk(connector,
8038 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8039 native_mode->hdisplay,
8040 native_mode->vdisplay);
8043 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8046 struct amdgpu_dm_connector *amdgpu_dm_connector =
8047 to_amdgpu_dm_connector(connector);
8050 /* empty probed_modes */
8051 INIT_LIST_HEAD(&connector->probed_modes);
8052 amdgpu_dm_connector->num_modes =
8053 drm_add_edid_modes(connector, edid);
8055 /* sorting the probed modes before calling function
8056 * amdgpu_dm_get_native_mode() since EDID can have
8057 * more than one preferred mode. The modes that are
8058 * later in the probed mode list could be of higher
8059 * and preferred resolution. For example, 3840x2160
8060 * resolution in base EDID preferred timing and 4096x2160
8061 * preferred resolution in DID extension block later.
8063 drm_mode_sort(&connector->probed_modes);
8064 amdgpu_dm_get_native_mode(connector);
8066 /* Freesync capabilities are reset by calling
8067 * drm_add_edid_modes() and need to be
8070 amdgpu_dm_update_freesync_caps(connector, edid);
8072 amdgpu_set_panel_orientation(connector);
8074 amdgpu_dm_connector->num_modes = 0;
8078 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8079 struct drm_display_mode *mode)
8081 struct drm_display_mode *m;
8083 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8084 if (drm_mode_equal(m, mode))
8091 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8093 const struct drm_display_mode *m;
8094 struct drm_display_mode *new_mode;
8096 uint32_t new_modes_count = 0;
8098 /* Standard FPS values
8107 * 60 - Commonly used
8108 * 48,72,96,120 - Multiples of 24
8110 static const uint32_t common_rates[] = {
8111 23976, 24000, 25000, 29970, 30000,
8112 48000, 50000, 60000, 72000, 96000, 120000
8116 * Find mode with highest refresh rate with the same resolution
8117 * as the preferred mode. Some monitors report a preferred mode
8118 * with lower resolution than the highest refresh rate supported.
8121 m = get_highest_refresh_rate_mode(aconnector, true);
8125 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8126 uint64_t target_vtotal, target_vtotal_diff;
8129 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8132 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8133 common_rates[i] > aconnector->max_vfreq * 1000)
8136 num = (unsigned long long)m->clock * 1000 * 1000;
8137 den = common_rates[i] * (unsigned long long)m->htotal;
8138 target_vtotal = div_u64(num, den);
8139 target_vtotal_diff = target_vtotal - m->vtotal;
8141 /* Check for illegal modes */
8142 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8143 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8144 m->vtotal + target_vtotal_diff < m->vsync_end)
8147 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8151 new_mode->vtotal += (u16)target_vtotal_diff;
8152 new_mode->vsync_start += (u16)target_vtotal_diff;
8153 new_mode->vsync_end += (u16)target_vtotal_diff;
8154 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8155 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8157 if (!is_duplicate_mode(aconnector, new_mode)) {
8158 drm_mode_probed_add(&aconnector->base, new_mode);
8159 new_modes_count += 1;
8161 drm_mode_destroy(aconnector->base.dev, new_mode);
8164 return new_modes_count;
8167 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8170 struct amdgpu_dm_connector *amdgpu_dm_connector =
8171 to_amdgpu_dm_connector(connector);
8173 if (!(amdgpu_freesync_vid_mode && edid))
8176 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8177 amdgpu_dm_connector->num_modes +=
8178 add_fs_modes(amdgpu_dm_connector);
8181 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8183 struct amdgpu_dm_connector *amdgpu_dm_connector =
8184 to_amdgpu_dm_connector(connector);
8185 struct drm_encoder *encoder;
8186 struct edid *edid = amdgpu_dm_connector->edid;
8188 encoder = amdgpu_dm_connector_to_encoder(connector);
8190 if (!drm_edid_is_valid(edid)) {
8191 amdgpu_dm_connector->num_modes =
8192 drm_add_modes_noedid(connector, 640, 480);
8194 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8195 amdgpu_dm_connector_add_common_modes(encoder, connector);
8196 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8198 amdgpu_dm_fbc_init(connector);
8200 return amdgpu_dm_connector->num_modes;
8203 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8204 struct amdgpu_dm_connector *aconnector,
8206 struct dc_link *link,
8209 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8212 * Some of the properties below require access to state, like bpc.
8213 * Allocate some default initial connector state with our reset helper.
8215 if (aconnector->base.funcs->reset)
8216 aconnector->base.funcs->reset(&aconnector->base);
8218 aconnector->connector_id = link_index;
8219 aconnector->dc_link = link;
8220 aconnector->base.interlace_allowed = false;
8221 aconnector->base.doublescan_allowed = false;
8222 aconnector->base.stereo_allowed = false;
8223 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8224 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8225 aconnector->audio_inst = -1;
8226 mutex_init(&aconnector->hpd_lock);
8229 * configure support HPD hot plug connector_>polled default value is 0
8230 * which means HPD hot plug not supported
8232 switch (connector_type) {
8233 case DRM_MODE_CONNECTOR_HDMIA:
8234 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8235 aconnector->base.ycbcr_420_allowed =
8236 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8238 case DRM_MODE_CONNECTOR_DisplayPort:
8239 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8240 if (link->is_dig_mapping_flexible &&
8241 link->dc->res_pool->funcs->link_encs_assign) {
8243 link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
8244 if (!link->link_enc)
8246 link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
8250 aconnector->base.ycbcr_420_allowed =
8251 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8253 case DRM_MODE_CONNECTOR_DVID:
8254 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8260 drm_object_attach_property(&aconnector->base.base,
8261 dm->ddev->mode_config.scaling_mode_property,
8262 DRM_MODE_SCALE_NONE);
8264 drm_object_attach_property(&aconnector->base.base,
8265 adev->mode_info.underscan_property,
8267 drm_object_attach_property(&aconnector->base.base,
8268 adev->mode_info.underscan_hborder_property,
8270 drm_object_attach_property(&aconnector->base.base,
8271 adev->mode_info.underscan_vborder_property,
8274 if (!aconnector->mst_port)
8275 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8277 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8278 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8279 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8281 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8282 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8283 drm_object_attach_property(&aconnector->base.base,
8284 adev->mode_info.abm_level_property, 0);
8287 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8288 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8289 connector_type == DRM_MODE_CONNECTOR_eDP) {
8290 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8292 if (!aconnector->mst_port)
8293 drm_connector_attach_vrr_capable_property(&aconnector->base);
8295 #ifdef CONFIG_DRM_AMD_DC_HDCP
8296 if (adev->dm.hdcp_workqueue)
8297 drm_connector_attach_content_protection_property(&aconnector->base, true);
8302 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8303 struct i2c_msg *msgs, int num)
8305 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8306 struct ddc_service *ddc_service = i2c->ddc_service;
8307 struct i2c_command cmd;
8311 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8316 cmd.number_of_payloads = num;
8317 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8320 for (i = 0; i < num; i++) {
8321 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8322 cmd.payloads[i].address = msgs[i].addr;
8323 cmd.payloads[i].length = msgs[i].len;
8324 cmd.payloads[i].data = msgs[i].buf;
8328 ddc_service->ctx->dc,
8329 ddc_service->ddc_pin->hw_info.ddc_channel,
8333 kfree(cmd.payloads);
8337 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8339 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8342 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8343 .master_xfer = amdgpu_dm_i2c_xfer,
8344 .functionality = amdgpu_dm_i2c_func,
8347 static struct amdgpu_i2c_adapter *
8348 create_i2c(struct ddc_service *ddc_service,
8352 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8353 struct amdgpu_i2c_adapter *i2c;
8355 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8358 i2c->base.owner = THIS_MODULE;
8359 i2c->base.class = I2C_CLASS_DDC;
8360 i2c->base.dev.parent = &adev->pdev->dev;
8361 i2c->base.algo = &amdgpu_dm_i2c_algo;
8362 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8363 i2c_set_adapdata(&i2c->base, i2c);
8364 i2c->ddc_service = ddc_service;
8365 if (i2c->ddc_service->ddc_pin)
8366 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8373 * Note: this function assumes that dc_link_detect() was called for the
8374 * dc_link which will be represented by this aconnector.
8376 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8377 struct amdgpu_dm_connector *aconnector,
8378 uint32_t link_index,
8379 struct amdgpu_encoder *aencoder)
8383 struct dc *dc = dm->dc;
8384 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8385 struct amdgpu_i2c_adapter *i2c;
8387 link->priv = aconnector;
8389 DRM_DEBUG_DRIVER("%s()\n", __func__);
8391 i2c = create_i2c(link->ddc, link->link_index, &res);
8393 DRM_ERROR("Failed to create i2c adapter data\n");
8397 aconnector->i2c = i2c;
8398 res = i2c_add_adapter(&i2c->base);
8401 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8405 connector_type = to_drm_connector_type(link->connector_signal);
8407 res = drm_connector_init_with_ddc(
8410 &amdgpu_dm_connector_funcs,
8415 DRM_ERROR("connector_init failed\n");
8416 aconnector->connector_id = -1;
8420 drm_connector_helper_add(
8422 &amdgpu_dm_connector_helper_funcs);
8424 amdgpu_dm_connector_init_helper(
8431 drm_connector_attach_encoder(
8432 &aconnector->base, &aencoder->base);
8434 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8435 || connector_type == DRM_MODE_CONNECTOR_eDP)
8436 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8441 aconnector->i2c = NULL;
8446 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8448 switch (adev->mode_info.num_crtc) {
8465 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8466 struct amdgpu_encoder *aencoder,
8467 uint32_t link_index)
8469 struct amdgpu_device *adev = drm_to_adev(dev);
8471 int res = drm_encoder_init(dev,
8473 &amdgpu_dm_encoder_funcs,
8474 DRM_MODE_ENCODER_TMDS,
8477 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8480 aencoder->encoder_id = link_index;
8482 aencoder->encoder_id = -1;
8484 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8489 static void manage_dm_interrupts(struct amdgpu_device *adev,
8490 struct amdgpu_crtc *acrtc,
8494 * We have no guarantee that the frontend index maps to the same
8495 * backend index - some even map to more than one.
8497 * TODO: Use a different interrupt or check DC itself for the mapping.
8500 amdgpu_display_crtc_idx_to_irq_type(
8505 drm_crtc_vblank_on(&acrtc->base);
8508 &adev->pageflip_irq,
8510 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8517 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8525 &adev->pageflip_irq,
8527 drm_crtc_vblank_off(&acrtc->base);
8531 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8532 struct amdgpu_crtc *acrtc)
8535 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8538 * This reads the current state for the IRQ and force reapplies
8539 * the setting to hardware.
8541 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8545 is_scaling_state_different(const struct dm_connector_state *dm_state,
8546 const struct dm_connector_state *old_dm_state)
8548 if (dm_state->scaling != old_dm_state->scaling)
8550 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8551 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8553 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8554 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8556 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8557 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8562 #ifdef CONFIG_DRM_AMD_DC_HDCP
8563 static bool is_content_protection_different(struct drm_connector_state *state,
8564 const struct drm_connector_state *old_state,
8565 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8567 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8568 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8570 /* Handle: Type0/1 change */
8571 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8572 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8573 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8577 /* CP is being re enabled, ignore this
8579 * Handles: ENABLED -> DESIRED
8581 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8582 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8583 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8587 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8589 * Handles: UNDESIRED -> ENABLED
8591 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8592 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8593 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8595 /* Stream removed and re-enabled
8597 * Can sometimes overlap with the HPD case,
8598 * thus set update_hdcp to false to avoid
8599 * setting HDCP multiple times.
8601 * Handles: DESIRED -> DESIRED (Special case)
8603 if (!(old_state->crtc && old_state->crtc->enabled) &&
8604 state->crtc && state->crtc->enabled &&
8605 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8606 dm_con_state->update_hdcp = false;
8610 /* Hot-plug, headless s3, dpms
8612 * Only start HDCP if the display is connected/enabled.
8613 * update_hdcp flag will be set to false until the next
8616 * Handles: DESIRED -> DESIRED (Special case)
8618 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8619 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8620 dm_con_state->update_hdcp = false;
8625 * Handles: UNDESIRED -> UNDESIRED
8626 * DESIRED -> DESIRED
8627 * ENABLED -> ENABLED
8629 if (old_state->content_protection == state->content_protection)
8633 * Handles: UNDESIRED -> DESIRED
8634 * DESIRED -> UNDESIRED
8635 * ENABLED -> UNDESIRED
8637 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8641 * Handles: DESIRED -> ENABLED
8647 static void remove_stream(struct amdgpu_device *adev,
8648 struct amdgpu_crtc *acrtc,
8649 struct dc_stream_state *stream)
8651 /* this is the update mode case */
8653 acrtc->otg_inst = -1;
8654 acrtc->enabled = false;
8657 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8658 struct dc_cursor_position *position)
8660 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8662 int xorigin = 0, yorigin = 0;
8664 if (!crtc || !plane->state->fb)
8667 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8668 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8669 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8671 plane->state->crtc_w,
8672 plane->state->crtc_h);
8676 x = plane->state->crtc_x;
8677 y = plane->state->crtc_y;
8679 if (x <= -amdgpu_crtc->max_cursor_width ||
8680 y <= -amdgpu_crtc->max_cursor_height)
8684 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8688 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8691 position->enable = true;
8692 position->translate_by_source = true;
8695 position->x_hotspot = xorigin;
8696 position->y_hotspot = yorigin;
8701 static void handle_cursor_update(struct drm_plane *plane,
8702 struct drm_plane_state *old_plane_state)
8704 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8705 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8706 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8707 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8708 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8709 uint64_t address = afb ? afb->address : 0;
8710 struct dc_cursor_position position = {0};
8711 struct dc_cursor_attributes attributes;
8714 if (!plane->state->fb && !old_plane_state->fb)
8717 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8719 amdgpu_crtc->crtc_id,
8720 plane->state->crtc_w,
8721 plane->state->crtc_h);
8723 ret = get_cursor_position(plane, crtc, &position);
8727 if (!position.enable) {
8728 /* turn off cursor */
8729 if (crtc_state && crtc_state->stream) {
8730 mutex_lock(&adev->dm.dc_lock);
8731 dc_stream_set_cursor_position(crtc_state->stream,
8733 mutex_unlock(&adev->dm.dc_lock);
8738 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8739 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8741 memset(&attributes, 0, sizeof(attributes));
8742 attributes.address.high_part = upper_32_bits(address);
8743 attributes.address.low_part = lower_32_bits(address);
8744 attributes.width = plane->state->crtc_w;
8745 attributes.height = plane->state->crtc_h;
8746 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8747 attributes.rotation_angle = 0;
8748 attributes.attribute_flags.value = 0;
8750 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8752 if (crtc_state->stream) {
8753 mutex_lock(&adev->dm.dc_lock);
8754 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8756 DRM_ERROR("DC failed to set cursor attributes\n");
8758 if (!dc_stream_set_cursor_position(crtc_state->stream,
8760 DRM_ERROR("DC failed to set cursor position\n");
8761 mutex_unlock(&adev->dm.dc_lock);
8765 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8768 assert_spin_locked(&acrtc->base.dev->event_lock);
8769 WARN_ON(acrtc->event);
8771 acrtc->event = acrtc->base.state->event;
8773 /* Set the flip status */
8774 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8776 /* Mark this event as consumed */
8777 acrtc->base.state->event = NULL;
8779 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8783 static void update_freesync_state_on_stream(
8784 struct amdgpu_display_manager *dm,
8785 struct dm_crtc_state *new_crtc_state,
8786 struct dc_stream_state *new_stream,
8787 struct dc_plane_state *surface,
8788 u32 flip_timestamp_in_us)
8790 struct mod_vrr_params vrr_params;
8791 struct dc_info_packet vrr_infopacket = {0};
8792 struct amdgpu_device *adev = dm->adev;
8793 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8794 unsigned long flags;
8795 bool pack_sdp_v1_3 = false;
8801 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8802 * For now it's sufficient to just guard against these conditions.
8805 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8808 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8809 vrr_params = acrtc->dm_irq_params.vrr_params;
8812 mod_freesync_handle_preflip(
8813 dm->freesync_module,
8816 flip_timestamp_in_us,
8819 if (adev->family < AMDGPU_FAMILY_AI &&
8820 amdgpu_dm_vrr_active(new_crtc_state)) {
8821 mod_freesync_handle_v_update(dm->freesync_module,
8822 new_stream, &vrr_params);
8824 /* Need to call this before the frame ends. */
8825 dc_stream_adjust_vmin_vmax(dm->dc,
8826 new_crtc_state->stream,
8827 &vrr_params.adjust);
8831 mod_freesync_build_vrr_infopacket(
8832 dm->freesync_module,
8836 TRANSFER_FUNC_UNKNOWN,
8840 new_crtc_state->freesync_timing_changed |=
8841 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8843 sizeof(vrr_params.adjust)) != 0);
8845 new_crtc_state->freesync_vrr_info_changed |=
8846 (memcmp(&new_crtc_state->vrr_infopacket,
8848 sizeof(vrr_infopacket)) != 0);
8850 acrtc->dm_irq_params.vrr_params = vrr_params;
8851 new_crtc_state->vrr_infopacket = vrr_infopacket;
8853 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8854 new_stream->vrr_infopacket = vrr_infopacket;
8856 if (new_crtc_state->freesync_vrr_info_changed)
8857 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8858 new_crtc_state->base.crtc->base.id,
8859 (int)new_crtc_state->base.vrr_enabled,
8860 (int)vrr_params.state);
8862 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8865 static void update_stream_irq_parameters(
8866 struct amdgpu_display_manager *dm,
8867 struct dm_crtc_state *new_crtc_state)
8869 struct dc_stream_state *new_stream = new_crtc_state->stream;
8870 struct mod_vrr_params vrr_params;
8871 struct mod_freesync_config config = new_crtc_state->freesync_config;
8872 struct amdgpu_device *adev = dm->adev;
8873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8874 unsigned long flags;
8880 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8881 * For now it's sufficient to just guard against these conditions.
8883 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8886 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8887 vrr_params = acrtc->dm_irq_params.vrr_params;
8889 if (new_crtc_state->vrr_supported &&
8890 config.min_refresh_in_uhz &&
8891 config.max_refresh_in_uhz) {
8893 * if freesync compatible mode was set, config.state will be set
8896 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8897 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8898 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8899 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8900 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8901 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8902 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8904 config.state = new_crtc_state->base.vrr_enabled ?
8905 VRR_STATE_ACTIVE_VARIABLE :
8909 config.state = VRR_STATE_UNSUPPORTED;
8912 mod_freesync_build_vrr_params(dm->freesync_module,
8914 &config, &vrr_params);
8916 new_crtc_state->freesync_timing_changed |=
8917 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8918 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8920 new_crtc_state->freesync_config = config;
8921 /* Copy state for access from DM IRQ handler */
8922 acrtc->dm_irq_params.freesync_config = config;
8923 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8924 acrtc->dm_irq_params.vrr_params = vrr_params;
8925 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8928 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8929 struct dm_crtc_state *new_state)
8931 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8932 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8934 if (!old_vrr_active && new_vrr_active) {
8935 /* Transition VRR inactive -> active:
8936 * While VRR is active, we must not disable vblank irq, as a
8937 * reenable after disable would compute bogus vblank/pflip
8938 * timestamps if it likely happened inside display front-porch.
8940 * We also need vupdate irq for the actual core vblank handling
8943 dm_set_vupdate_irq(new_state->base.crtc, true);
8944 drm_crtc_vblank_get(new_state->base.crtc);
8945 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8946 __func__, new_state->base.crtc->base.id);
8947 } else if (old_vrr_active && !new_vrr_active) {
8948 /* Transition VRR active -> inactive:
8949 * Allow vblank irq disable again for fixed refresh rate.
8951 dm_set_vupdate_irq(new_state->base.crtc, false);
8952 drm_crtc_vblank_put(new_state->base.crtc);
8953 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8954 __func__, new_state->base.crtc->base.id);
8958 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8960 struct drm_plane *plane;
8961 struct drm_plane_state *old_plane_state;
8965 * TODO: Make this per-stream so we don't issue redundant updates for
8966 * commits with multiple streams.
8968 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8969 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8970 handle_cursor_update(plane, old_plane_state);
8973 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8974 struct dc_state *dc_state,
8975 struct drm_device *dev,
8976 struct amdgpu_display_manager *dm,
8977 struct drm_crtc *pcrtc,
8978 bool wait_for_vblank)
8981 uint64_t timestamp_ns;
8982 struct drm_plane *plane;
8983 struct drm_plane_state *old_plane_state, *new_plane_state;
8984 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8985 struct drm_crtc_state *new_pcrtc_state =
8986 drm_atomic_get_new_crtc_state(state, pcrtc);
8987 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8988 struct dm_crtc_state *dm_old_crtc_state =
8989 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8990 int planes_count = 0, vpos, hpos;
8992 unsigned long flags;
8993 struct amdgpu_bo *abo;
8994 uint32_t target_vblank, last_flip_vblank;
8995 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8996 bool pflip_present = false;
8998 struct dc_surface_update surface_updates[MAX_SURFACES];
8999 struct dc_plane_info plane_infos[MAX_SURFACES];
9000 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9001 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9002 struct dc_stream_update stream_update;
9005 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9008 dm_error("Failed to allocate update bundle\n");
9013 * Disable the cursor first if we're disabling all the planes.
9014 * It'll remain on the screen after the planes are re-enabled
9017 if (acrtc_state->active_planes == 0)
9018 amdgpu_dm_commit_cursors(state);
9020 /* update planes when needed */
9021 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9022 struct drm_crtc *crtc = new_plane_state->crtc;
9023 struct drm_crtc_state *new_crtc_state;
9024 struct drm_framebuffer *fb = new_plane_state->fb;
9025 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9026 bool plane_needs_flip;
9027 struct dc_plane_state *dc_plane;
9028 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9030 /* Cursor plane is handled after stream updates */
9031 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9034 if (!fb || !crtc || pcrtc != crtc)
9037 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9038 if (!new_crtc_state->active)
9041 dc_plane = dm_new_plane_state->dc_state;
9043 bundle->surface_updates[planes_count].surface = dc_plane;
9044 if (new_pcrtc_state->color_mgmt_changed) {
9045 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9046 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9047 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9050 fill_dc_scaling_info(dm->adev, new_plane_state,
9051 &bundle->scaling_infos[planes_count]);
9053 bundle->surface_updates[planes_count].scaling_info =
9054 &bundle->scaling_infos[planes_count];
9056 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9058 pflip_present = pflip_present || plane_needs_flip;
9060 if (!plane_needs_flip) {
9065 abo = gem_to_amdgpu_bo(fb->obj[0]);
9068 * Wait for all fences on this FB. Do limited wait to avoid
9069 * deadlock during GPU reset when this fence will not signal
9070 * but we hold reservation lock for the BO.
9072 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9073 msecs_to_jiffies(5000));
9074 if (unlikely(r <= 0))
9075 DRM_ERROR("Waiting for fences timed out!");
9077 fill_dc_plane_info_and_addr(
9078 dm->adev, new_plane_state,
9080 &bundle->plane_infos[planes_count],
9081 &bundle->flip_addrs[planes_count].address,
9082 afb->tmz_surface, false);
9084 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9085 new_plane_state->plane->index,
9086 bundle->plane_infos[planes_count].dcc.enable);
9088 bundle->surface_updates[planes_count].plane_info =
9089 &bundle->plane_infos[planes_count];
9092 * Only allow immediate flips for fast updates that don't
9093 * change FB pitch, DCC state, rotation or mirroing.
9095 bundle->flip_addrs[planes_count].flip_immediate =
9096 crtc->state->async_flip &&
9097 acrtc_state->update_type == UPDATE_TYPE_FAST;
9099 timestamp_ns = ktime_get_ns();
9100 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9101 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9102 bundle->surface_updates[planes_count].surface = dc_plane;
9104 if (!bundle->surface_updates[planes_count].surface) {
9105 DRM_ERROR("No surface for CRTC: id=%d\n",
9106 acrtc_attach->crtc_id);
9110 if (plane == pcrtc->primary)
9111 update_freesync_state_on_stream(
9114 acrtc_state->stream,
9116 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9118 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9120 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9121 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9127 if (pflip_present) {
9129 /* Use old throttling in non-vrr fixed refresh rate mode
9130 * to keep flip scheduling based on target vblank counts
9131 * working in a backwards compatible way, e.g., for
9132 * clients using the GLX_OML_sync_control extension or
9133 * DRI3/Present extension with defined target_msc.
9135 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9138 /* For variable refresh rate mode only:
9139 * Get vblank of last completed flip to avoid > 1 vrr
9140 * flips per video frame by use of throttling, but allow
9141 * flip programming anywhere in the possibly large
9142 * variable vrr vblank interval for fine-grained flip
9143 * timing control and more opportunity to avoid stutter
9144 * on late submission of flips.
9146 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9147 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9148 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9151 target_vblank = last_flip_vblank + wait_for_vblank;
9154 * Wait until we're out of the vertical blank period before the one
9155 * targeted by the flip
9157 while ((acrtc_attach->enabled &&
9158 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9159 0, &vpos, &hpos, NULL,
9160 NULL, &pcrtc->hwmode)
9161 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9162 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9163 (int)(target_vblank -
9164 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9165 usleep_range(1000, 1100);
9169 * Prepare the flip event for the pageflip interrupt to handle.
9171 * This only works in the case where we've already turned on the
9172 * appropriate hardware blocks (eg. HUBP) so in the transition case
9173 * from 0 -> n planes we have to skip a hardware generated event
9174 * and rely on sending it from software.
9176 if (acrtc_attach->base.state->event &&
9177 acrtc_state->active_planes > 0 &&
9178 !acrtc_state->force_dpms_off) {
9179 drm_crtc_vblank_get(pcrtc);
9181 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9183 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9184 prepare_flip_isr(acrtc_attach);
9186 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9189 if (acrtc_state->stream) {
9190 if (acrtc_state->freesync_vrr_info_changed)
9191 bundle->stream_update.vrr_infopacket =
9192 &acrtc_state->stream->vrr_infopacket;
9196 /* Update the planes if changed or disable if we don't have any. */
9197 if ((planes_count || acrtc_state->active_planes == 0) &&
9198 acrtc_state->stream) {
9199 #if defined(CONFIG_DRM_AMD_DC_DCN)
9201 * If PSR or idle optimizations are enabled then flush out
9202 * any pending work before hardware programming.
9204 if (dm->vblank_control_workqueue)
9205 flush_workqueue(dm->vblank_control_workqueue);
9208 bundle->stream_update.stream = acrtc_state->stream;
9209 if (new_pcrtc_state->mode_changed) {
9210 bundle->stream_update.src = acrtc_state->stream->src;
9211 bundle->stream_update.dst = acrtc_state->stream->dst;
9214 if (new_pcrtc_state->color_mgmt_changed) {
9216 * TODO: This isn't fully correct since we've actually
9217 * already modified the stream in place.
9219 bundle->stream_update.gamut_remap =
9220 &acrtc_state->stream->gamut_remap_matrix;
9221 bundle->stream_update.output_csc_transform =
9222 &acrtc_state->stream->csc_color_matrix;
9223 bundle->stream_update.out_transfer_func =
9224 acrtc_state->stream->out_transfer_func;
9227 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9228 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9229 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9232 * If FreeSync state on the stream has changed then we need to
9233 * re-adjust the min/max bounds now that DC doesn't handle this
9234 * as part of commit.
9236 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9237 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9238 dc_stream_adjust_vmin_vmax(
9239 dm->dc, acrtc_state->stream,
9240 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9241 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9243 mutex_lock(&dm->dc_lock);
9244 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9245 acrtc_state->stream->link->psr_settings.psr_allow_active)
9246 amdgpu_dm_psr_disable(acrtc_state->stream);
9248 dc_commit_updates_for_stream(dm->dc,
9249 bundle->surface_updates,
9251 acrtc_state->stream,
9252 &bundle->stream_update,
9256 * Enable or disable the interrupts on the backend.
9258 * Most pipes are put into power gating when unused.
9260 * When power gating is enabled on a pipe we lose the
9261 * interrupt enablement state when power gating is disabled.
9263 * So we need to update the IRQ control state in hardware
9264 * whenever the pipe turns on (since it could be previously
9265 * power gated) or off (since some pipes can't be power gated
9268 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9269 dm_update_pflip_irq_state(drm_to_adev(dev),
9272 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9273 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9274 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9275 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9277 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9278 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9279 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9280 struct amdgpu_dm_connector *aconn =
9281 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9283 if (aconn->psr_skip_count > 0)
9284 aconn->psr_skip_count--;
9286 /* Allow PSR when skip count is 0. */
9287 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9289 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9292 mutex_unlock(&dm->dc_lock);
9296 * Update cursor state *after* programming all the planes.
9297 * This avoids redundant programming in the case where we're going
9298 * to be disabling a single plane - those pipes are being disabled.
9300 if (acrtc_state->active_planes)
9301 amdgpu_dm_commit_cursors(state);
9307 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9308 struct drm_atomic_state *state)
9310 struct amdgpu_device *adev = drm_to_adev(dev);
9311 struct amdgpu_dm_connector *aconnector;
9312 struct drm_connector *connector;
9313 struct drm_connector_state *old_con_state, *new_con_state;
9314 struct drm_crtc_state *new_crtc_state;
9315 struct dm_crtc_state *new_dm_crtc_state;
9316 const struct dc_stream_status *status;
9319 /* Notify device removals. */
9320 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9321 if (old_con_state->crtc != new_con_state->crtc) {
9322 /* CRTC changes require notification. */
9326 if (!new_con_state->crtc)
9329 new_crtc_state = drm_atomic_get_new_crtc_state(
9330 state, new_con_state->crtc);
9332 if (!new_crtc_state)
9335 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9339 aconnector = to_amdgpu_dm_connector(connector);
9341 mutex_lock(&adev->dm.audio_lock);
9342 inst = aconnector->audio_inst;
9343 aconnector->audio_inst = -1;
9344 mutex_unlock(&adev->dm.audio_lock);
9346 amdgpu_dm_audio_eld_notify(adev, inst);
9349 /* Notify audio device additions. */
9350 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9351 if (!new_con_state->crtc)
9354 new_crtc_state = drm_atomic_get_new_crtc_state(
9355 state, new_con_state->crtc);
9357 if (!new_crtc_state)
9360 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9363 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9364 if (!new_dm_crtc_state->stream)
9367 status = dc_stream_get_status(new_dm_crtc_state->stream);
9371 aconnector = to_amdgpu_dm_connector(connector);
9373 mutex_lock(&adev->dm.audio_lock);
9374 inst = status->audio_inst;
9375 aconnector->audio_inst = inst;
9376 mutex_unlock(&adev->dm.audio_lock);
9378 amdgpu_dm_audio_eld_notify(adev, inst);
9383 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9384 * @crtc_state: the DRM CRTC state
9385 * @stream_state: the DC stream state.
9387 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9388 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9390 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9391 struct dc_stream_state *stream_state)
9393 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9397 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9398 * @state: The atomic state to commit
9400 * This will tell DC to commit the constructed DC state from atomic_check,
9401 * programming the hardware. Any failures here implies a hardware failure, since
9402 * atomic check should have filtered anything non-kosher.
9404 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9406 struct drm_device *dev = state->dev;
9407 struct amdgpu_device *adev = drm_to_adev(dev);
9408 struct amdgpu_display_manager *dm = &adev->dm;
9409 struct dm_atomic_state *dm_state;
9410 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9412 struct drm_crtc *crtc;
9413 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9414 unsigned long flags;
9415 bool wait_for_vblank = true;
9416 struct drm_connector *connector;
9417 struct drm_connector_state *old_con_state, *new_con_state;
9418 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9419 int crtc_disable_count = 0;
9420 bool mode_set_reset_required = false;
9422 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9424 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9426 dm_state = dm_atomic_get_new_state(state);
9427 if (dm_state && dm_state->context) {
9428 dc_state = dm_state->context;
9430 /* No state changes, retain current state. */
9431 dc_state_temp = dc_create_state(dm->dc);
9432 ASSERT(dc_state_temp);
9433 dc_state = dc_state_temp;
9434 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9437 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9438 new_crtc_state, i) {
9439 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9441 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9443 if (old_crtc_state->active &&
9444 (!new_crtc_state->active ||
9445 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9446 manage_dm_interrupts(adev, acrtc, false);
9447 dc_stream_release(dm_old_crtc_state->stream);
9451 drm_atomic_helper_calc_timestamping_constants(state);
9453 /* update changed items */
9454 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9455 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9457 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9458 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9461 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9462 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9463 "connectors_changed:%d\n",
9465 new_crtc_state->enable,
9466 new_crtc_state->active,
9467 new_crtc_state->planes_changed,
9468 new_crtc_state->mode_changed,
9469 new_crtc_state->active_changed,
9470 new_crtc_state->connectors_changed);
9472 /* Disable cursor if disabling crtc */
9473 if (old_crtc_state->active && !new_crtc_state->active) {
9474 struct dc_cursor_position position;
9476 memset(&position, 0, sizeof(position));
9477 mutex_lock(&dm->dc_lock);
9478 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9479 mutex_unlock(&dm->dc_lock);
9482 /* Copy all transient state flags into dc state */
9483 if (dm_new_crtc_state->stream) {
9484 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9485 dm_new_crtc_state->stream);
9488 /* handles headless hotplug case, updating new_state and
9489 * aconnector as needed
9492 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9494 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9496 if (!dm_new_crtc_state->stream) {
9498 * this could happen because of issues with
9499 * userspace notifications delivery.
9500 * In this case userspace tries to set mode on
9501 * display which is disconnected in fact.
9502 * dc_sink is NULL in this case on aconnector.
9503 * We expect reset mode will come soon.
9505 * This can also happen when unplug is done
9506 * during resume sequence ended
9508 * In this case, we want to pretend we still
9509 * have a sink to keep the pipe running so that
9510 * hw state is consistent with the sw state
9512 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9513 __func__, acrtc->base.base.id);
9517 if (dm_old_crtc_state->stream)
9518 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9520 pm_runtime_get_noresume(dev->dev);
9522 acrtc->enabled = true;
9523 acrtc->hw_mode = new_crtc_state->mode;
9524 crtc->hwmode = new_crtc_state->mode;
9525 mode_set_reset_required = true;
9526 } else if (modereset_required(new_crtc_state)) {
9527 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9528 /* i.e. reset mode */
9529 if (dm_old_crtc_state->stream)
9530 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9532 mode_set_reset_required = true;
9534 } /* for_each_crtc_in_state() */
9537 /* if there mode set or reset, disable eDP PSR */
9538 if (mode_set_reset_required) {
9539 #if defined(CONFIG_DRM_AMD_DC_DCN)
9540 if (dm->vblank_control_workqueue)
9541 flush_workqueue(dm->vblank_control_workqueue);
9543 amdgpu_dm_psr_disable_all(dm);
9546 dm_enable_per_frame_crtc_master_sync(dc_state);
9547 mutex_lock(&dm->dc_lock);
9548 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9549 #if defined(CONFIG_DRM_AMD_DC_DCN)
9550 /* Allow idle optimization when vblank count is 0 for display off */
9551 if (dm->active_vblank_irq_count == 0)
9552 dc_allow_idle_optimizations(dm->dc,true);
9554 mutex_unlock(&dm->dc_lock);
9557 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9558 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9560 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9562 if (dm_new_crtc_state->stream != NULL) {
9563 const struct dc_stream_status *status =
9564 dc_stream_get_status(dm_new_crtc_state->stream);
9567 status = dc_stream_get_status_from_state(dc_state,
9568 dm_new_crtc_state->stream);
9570 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9572 acrtc->otg_inst = status->primary_otg_inst;
9575 #ifdef CONFIG_DRM_AMD_DC_HDCP
9576 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9577 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9578 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9579 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9581 new_crtc_state = NULL;
9584 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9586 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9588 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9589 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9590 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9591 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9592 dm_new_con_state->update_hdcp = true;
9596 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9597 hdcp_update_display(
9598 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9599 new_con_state->hdcp_content_type,
9600 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9604 /* Handle connector state changes */
9605 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9606 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9607 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9608 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9609 struct dc_surface_update dummy_updates[MAX_SURFACES];
9610 struct dc_stream_update stream_update;
9611 struct dc_info_packet hdr_packet;
9612 struct dc_stream_status *status = NULL;
9613 bool abm_changed, hdr_changed, scaling_changed;
9615 memset(&dummy_updates, 0, sizeof(dummy_updates));
9616 memset(&stream_update, 0, sizeof(stream_update));
9619 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9620 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9623 /* Skip any modesets/resets */
9624 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9627 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9630 scaling_changed = is_scaling_state_different(dm_new_con_state,
9633 abm_changed = dm_new_crtc_state->abm_level !=
9634 dm_old_crtc_state->abm_level;
9637 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9639 if (!scaling_changed && !abm_changed && !hdr_changed)
9642 stream_update.stream = dm_new_crtc_state->stream;
9643 if (scaling_changed) {
9644 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9645 dm_new_con_state, dm_new_crtc_state->stream);
9647 stream_update.src = dm_new_crtc_state->stream->src;
9648 stream_update.dst = dm_new_crtc_state->stream->dst;
9652 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9654 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9658 fill_hdr_info_packet(new_con_state, &hdr_packet);
9659 stream_update.hdr_static_metadata = &hdr_packet;
9662 status = dc_stream_get_status(dm_new_crtc_state->stream);
9664 if (WARN_ON(!status))
9667 WARN_ON(!status->plane_count);
9670 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9671 * Here we create an empty update on each plane.
9672 * To fix this, DC should permit updating only stream properties.
9674 for (j = 0; j < status->plane_count; j++)
9675 dummy_updates[j].surface = status->plane_states[0];
9678 mutex_lock(&dm->dc_lock);
9679 dc_commit_updates_for_stream(dm->dc,
9681 status->plane_count,
9682 dm_new_crtc_state->stream,
9685 mutex_unlock(&dm->dc_lock);
9688 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9689 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9690 new_crtc_state, i) {
9691 if (old_crtc_state->active && !new_crtc_state->active)
9692 crtc_disable_count++;
9694 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9695 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9697 /* For freesync config update on crtc state and params for irq */
9698 update_stream_irq_parameters(dm, dm_new_crtc_state);
9700 /* Handle vrr on->off / off->on transitions */
9701 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9706 * Enable interrupts for CRTCs that are newly enabled or went through
9707 * a modeset. It was intentionally deferred until after the front end
9708 * state was modified to wait until the OTG was on and so the IRQ
9709 * handlers didn't access stale or invalid state.
9711 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9712 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9713 #ifdef CONFIG_DEBUG_FS
9714 bool configure_crc = false;
9715 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9716 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9717 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9719 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9720 cur_crc_src = acrtc->dm_irq_params.crc_src;
9721 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9723 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9725 if (new_crtc_state->active &&
9726 (!old_crtc_state->active ||
9727 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9728 dc_stream_retain(dm_new_crtc_state->stream);
9729 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9730 manage_dm_interrupts(adev, acrtc, true);
9732 #ifdef CONFIG_DEBUG_FS
9734 * Frontend may have changed so reapply the CRC capture
9735 * settings for the stream.
9737 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9739 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9740 configure_crc = true;
9741 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9742 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9743 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9744 acrtc->dm_irq_params.crc_window.update_win = true;
9745 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9746 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9747 crc_rd_wrk->crtc = crtc;
9748 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9749 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9755 if (amdgpu_dm_crtc_configure_crc_source(
9756 crtc, dm_new_crtc_state, cur_crc_src))
9757 DRM_DEBUG_DRIVER("Failed to configure crc source");
9762 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9763 if (new_crtc_state->async_flip)
9764 wait_for_vblank = false;
9766 /* update planes when needed per crtc*/
9767 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9768 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9770 if (dm_new_crtc_state->stream)
9771 amdgpu_dm_commit_planes(state, dc_state, dev,
9772 dm, crtc, wait_for_vblank);
9775 /* Update audio instances for each connector. */
9776 amdgpu_dm_commit_audio(dev, state);
9778 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9779 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9780 /* restore the backlight level */
9781 for (i = 0; i < dm->num_of_edps; i++) {
9782 if (dm->backlight_dev[i] &&
9783 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9784 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9788 * send vblank event on all events not handled in flip and
9789 * mark consumed event for drm_atomic_helper_commit_hw_done
9791 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9792 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9794 if (new_crtc_state->event)
9795 drm_send_event_locked(dev, &new_crtc_state->event->base);
9797 new_crtc_state->event = NULL;
9799 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9801 /* Signal HW programming completion */
9802 drm_atomic_helper_commit_hw_done(state);
9804 if (wait_for_vblank)
9805 drm_atomic_helper_wait_for_flip_done(dev, state);
9807 drm_atomic_helper_cleanup_planes(dev, state);
9809 /* return the stolen vga memory back to VRAM */
9810 if (!adev->mman.keep_stolen_vga_memory)
9811 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9812 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9815 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9816 * so we can put the GPU into runtime suspend if we're not driving any
9819 for (i = 0; i < crtc_disable_count; i++)
9820 pm_runtime_put_autosuspend(dev->dev);
9821 pm_runtime_mark_last_busy(dev->dev);
9824 dc_release_state(dc_state_temp);
9828 static int dm_force_atomic_commit(struct drm_connector *connector)
9831 struct drm_device *ddev = connector->dev;
9832 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9833 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9834 struct drm_plane *plane = disconnected_acrtc->base.primary;
9835 struct drm_connector_state *conn_state;
9836 struct drm_crtc_state *crtc_state;
9837 struct drm_plane_state *plane_state;
9842 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9844 /* Construct an atomic state to restore previous display setting */
9847 * Attach connectors to drm_atomic_state
9849 conn_state = drm_atomic_get_connector_state(state, connector);
9851 ret = PTR_ERR_OR_ZERO(conn_state);
9855 /* Attach crtc to drm_atomic_state*/
9856 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9858 ret = PTR_ERR_OR_ZERO(crtc_state);
9862 /* force a restore */
9863 crtc_state->mode_changed = true;
9865 /* Attach plane to drm_atomic_state */
9866 plane_state = drm_atomic_get_plane_state(state, plane);
9868 ret = PTR_ERR_OR_ZERO(plane_state);
9872 /* Call commit internally with the state we just constructed */
9873 ret = drm_atomic_commit(state);
9876 drm_atomic_state_put(state);
9878 DRM_ERROR("Restoring old state failed with %i\n", ret);
9884 * This function handles all cases when set mode does not come upon hotplug.
9885 * This includes when a display is unplugged then plugged back into the
9886 * same port and when running without usermode desktop manager supprot
9888 void dm_restore_drm_connector_state(struct drm_device *dev,
9889 struct drm_connector *connector)
9891 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9892 struct amdgpu_crtc *disconnected_acrtc;
9893 struct dm_crtc_state *acrtc_state;
9895 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9898 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9899 if (!disconnected_acrtc)
9902 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9903 if (!acrtc_state->stream)
9907 * If the previous sink is not released and different from the current,
9908 * we deduce we are in a state where we can not rely on usermode call
9909 * to turn on the display, so we do it here
9911 if (acrtc_state->stream->sink != aconnector->dc_sink)
9912 dm_force_atomic_commit(&aconnector->base);
9916 * Grabs all modesetting locks to serialize against any blocking commits,
9917 * Waits for completion of all non blocking commits.
9919 static int do_aquire_global_lock(struct drm_device *dev,
9920 struct drm_atomic_state *state)
9922 struct drm_crtc *crtc;
9923 struct drm_crtc_commit *commit;
9927 * Adding all modeset locks to aquire_ctx will
9928 * ensure that when the framework release it the
9929 * extra locks we are locking here will get released to
9931 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9935 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9936 spin_lock(&crtc->commit_lock);
9937 commit = list_first_entry_or_null(&crtc->commit_list,
9938 struct drm_crtc_commit, commit_entry);
9940 drm_crtc_commit_get(commit);
9941 spin_unlock(&crtc->commit_lock);
9947 * Make sure all pending HW programming completed and
9950 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9953 ret = wait_for_completion_interruptible_timeout(
9954 &commit->flip_done, 10*HZ);
9957 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9958 "timed out\n", crtc->base.id, crtc->name);
9960 drm_crtc_commit_put(commit);
9963 return ret < 0 ? ret : 0;
9966 static void get_freesync_config_for_crtc(
9967 struct dm_crtc_state *new_crtc_state,
9968 struct dm_connector_state *new_con_state)
9970 struct mod_freesync_config config = {0};
9971 struct amdgpu_dm_connector *aconnector =
9972 to_amdgpu_dm_connector(new_con_state->base.connector);
9973 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9974 int vrefresh = drm_mode_vrefresh(mode);
9975 bool fs_vid_mode = false;
9977 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9978 vrefresh >= aconnector->min_vfreq &&
9979 vrefresh <= aconnector->max_vfreq;
9981 if (new_crtc_state->vrr_supported) {
9982 new_crtc_state->stream->ignore_msa_timing_param = true;
9983 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9985 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9986 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9987 config.vsif_supported = true;
9991 config.state = VRR_STATE_ACTIVE_FIXED;
9992 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9994 } else if (new_crtc_state->base.vrr_enabled) {
9995 config.state = VRR_STATE_ACTIVE_VARIABLE;
9997 config.state = VRR_STATE_INACTIVE;
10001 new_crtc_state->freesync_config = config;
10004 static void reset_freesync_config_for_crtc(
10005 struct dm_crtc_state *new_crtc_state)
10007 new_crtc_state->vrr_supported = false;
10009 memset(&new_crtc_state->vrr_infopacket, 0,
10010 sizeof(new_crtc_state->vrr_infopacket));
10014 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10015 struct drm_crtc_state *new_crtc_state)
10017 struct drm_display_mode old_mode, new_mode;
10019 if (!old_crtc_state || !new_crtc_state)
10022 old_mode = old_crtc_state->mode;
10023 new_mode = new_crtc_state->mode;
10025 if (old_mode.clock == new_mode.clock &&
10026 old_mode.hdisplay == new_mode.hdisplay &&
10027 old_mode.vdisplay == new_mode.vdisplay &&
10028 old_mode.htotal == new_mode.htotal &&
10029 old_mode.vtotal != new_mode.vtotal &&
10030 old_mode.hsync_start == new_mode.hsync_start &&
10031 old_mode.vsync_start != new_mode.vsync_start &&
10032 old_mode.hsync_end == new_mode.hsync_end &&
10033 old_mode.vsync_end != new_mode.vsync_end &&
10034 old_mode.hskew == new_mode.hskew &&
10035 old_mode.vscan == new_mode.vscan &&
10036 (old_mode.vsync_end - old_mode.vsync_start) ==
10037 (new_mode.vsync_end - new_mode.vsync_start))
10043 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10044 uint64_t num, den, res;
10045 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10047 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10049 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10050 den = (unsigned long long)new_crtc_state->mode.htotal *
10051 (unsigned long long)new_crtc_state->mode.vtotal;
10053 res = div_u64(num, den);
10054 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10057 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10058 struct drm_atomic_state *state,
10059 struct drm_crtc *crtc,
10060 struct drm_crtc_state *old_crtc_state,
10061 struct drm_crtc_state *new_crtc_state,
10063 bool *lock_and_validation_needed)
10065 struct dm_atomic_state *dm_state = NULL;
10066 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10067 struct dc_stream_state *new_stream;
10071 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10072 * update changed items
10074 struct amdgpu_crtc *acrtc = NULL;
10075 struct amdgpu_dm_connector *aconnector = NULL;
10076 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10077 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10081 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10082 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10083 acrtc = to_amdgpu_crtc(crtc);
10084 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10086 /* TODO This hack should go away */
10087 if (aconnector && enable) {
10088 /* Make sure fake sink is created in plug-in scenario */
10089 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10090 &aconnector->base);
10091 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10092 &aconnector->base);
10094 if (IS_ERR(drm_new_conn_state)) {
10095 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10099 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10100 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10102 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10105 new_stream = create_validate_stream_for_sink(aconnector,
10106 &new_crtc_state->mode,
10108 dm_old_crtc_state->stream);
10111 * we can have no stream on ACTION_SET if a display
10112 * was disconnected during S3, in this case it is not an
10113 * error, the OS will be updated after detection, and
10114 * will do the right thing on next atomic commit
10118 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10119 __func__, acrtc->base.base.id);
10125 * TODO: Check VSDB bits to decide whether this should
10126 * be enabled or not.
10128 new_stream->triggered_crtc_reset.enabled =
10129 dm->force_timing_sync;
10131 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10133 ret = fill_hdr_info_packet(drm_new_conn_state,
10134 &new_stream->hdr_static_metadata);
10139 * If we already removed the old stream from the context
10140 * (and set the new stream to NULL) then we can't reuse
10141 * the old stream even if the stream and scaling are unchanged.
10142 * We'll hit the BUG_ON and black screen.
10144 * TODO: Refactor this function to allow this check to work
10145 * in all conditions.
10147 if (amdgpu_freesync_vid_mode &&
10148 dm_new_crtc_state->stream &&
10149 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10152 if (dm_new_crtc_state->stream &&
10153 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10154 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10155 new_crtc_state->mode_changed = false;
10156 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10157 new_crtc_state->mode_changed);
10161 /* mode_changed flag may get updated above, need to check again */
10162 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10166 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10167 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10168 "connectors_changed:%d\n",
10170 new_crtc_state->enable,
10171 new_crtc_state->active,
10172 new_crtc_state->planes_changed,
10173 new_crtc_state->mode_changed,
10174 new_crtc_state->active_changed,
10175 new_crtc_state->connectors_changed);
10177 /* Remove stream for any changed/disabled CRTC */
10180 if (!dm_old_crtc_state->stream)
10183 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10184 is_timing_unchanged_for_freesync(new_crtc_state,
10186 new_crtc_state->mode_changed = false;
10188 "Mode change not required for front porch change, "
10189 "setting mode_changed to %d",
10190 new_crtc_state->mode_changed);
10192 set_freesync_fixed_config(dm_new_crtc_state);
10195 } else if (amdgpu_freesync_vid_mode && aconnector &&
10196 is_freesync_video_mode(&new_crtc_state->mode,
10198 struct drm_display_mode *high_mode;
10200 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10201 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10202 set_freesync_fixed_config(dm_new_crtc_state);
10206 ret = dm_atomic_get_state(state, &dm_state);
10210 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10213 /* i.e. reset mode */
10214 if (dc_remove_stream_from_ctx(
10217 dm_old_crtc_state->stream) != DC_OK) {
10222 dc_stream_release(dm_old_crtc_state->stream);
10223 dm_new_crtc_state->stream = NULL;
10225 reset_freesync_config_for_crtc(dm_new_crtc_state);
10227 *lock_and_validation_needed = true;
10229 } else {/* Add stream for any updated/enabled CRTC */
10231 * Quick fix to prevent NULL pointer on new_stream when
10232 * added MST connectors not found in existing crtc_state in the chained mode
10233 * TODO: need to dig out the root cause of that
10235 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10238 if (modereset_required(new_crtc_state))
10241 if (modeset_required(new_crtc_state, new_stream,
10242 dm_old_crtc_state->stream)) {
10244 WARN_ON(dm_new_crtc_state->stream);
10246 ret = dm_atomic_get_state(state, &dm_state);
10250 dm_new_crtc_state->stream = new_stream;
10252 dc_stream_retain(new_stream);
10254 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10257 if (dc_add_stream_to_ctx(
10260 dm_new_crtc_state->stream) != DC_OK) {
10265 *lock_and_validation_needed = true;
10270 /* Release extra reference */
10272 dc_stream_release(new_stream);
10275 * We want to do dc stream updates that do not require a
10276 * full modeset below.
10278 if (!(enable && aconnector && new_crtc_state->active))
10281 * Given above conditions, the dc state cannot be NULL because:
10282 * 1. We're in the process of enabling CRTCs (just been added
10283 * to the dc context, or already is on the context)
10284 * 2. Has a valid connector attached, and
10285 * 3. Is currently active and enabled.
10286 * => The dc stream state currently exists.
10288 BUG_ON(dm_new_crtc_state->stream == NULL);
10290 /* Scaling or underscan settings */
10291 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10292 drm_atomic_crtc_needs_modeset(new_crtc_state))
10293 update_stream_scaling_settings(
10294 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10297 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10300 * Color management settings. We also update color properties
10301 * when a modeset is needed, to ensure it gets reprogrammed.
10303 if (dm_new_crtc_state->base.color_mgmt_changed ||
10304 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10305 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10310 /* Update Freesync settings. */
10311 get_freesync_config_for_crtc(dm_new_crtc_state,
10312 dm_new_conn_state);
10318 dc_stream_release(new_stream);
10322 static bool should_reset_plane(struct drm_atomic_state *state,
10323 struct drm_plane *plane,
10324 struct drm_plane_state *old_plane_state,
10325 struct drm_plane_state *new_plane_state)
10327 struct drm_plane *other;
10328 struct drm_plane_state *old_other_state, *new_other_state;
10329 struct drm_crtc_state *new_crtc_state;
10333 * TODO: Remove this hack once the checks below are sufficient
10334 * enough to determine when we need to reset all the planes on
10337 if (state->allow_modeset)
10340 /* Exit early if we know that we're adding or removing the plane. */
10341 if (old_plane_state->crtc != new_plane_state->crtc)
10344 /* old crtc == new_crtc == NULL, plane not in context. */
10345 if (!new_plane_state->crtc)
10349 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10351 if (!new_crtc_state)
10354 /* CRTC Degamma changes currently require us to recreate planes. */
10355 if (new_crtc_state->color_mgmt_changed)
10358 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10362 * If there are any new primary or overlay planes being added or
10363 * removed then the z-order can potentially change. To ensure
10364 * correct z-order and pipe acquisition the current DC architecture
10365 * requires us to remove and recreate all existing planes.
10367 * TODO: Come up with a more elegant solution for this.
10369 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10370 struct amdgpu_framebuffer *old_afb, *new_afb;
10371 if (other->type == DRM_PLANE_TYPE_CURSOR)
10374 if (old_other_state->crtc != new_plane_state->crtc &&
10375 new_other_state->crtc != new_plane_state->crtc)
10378 if (old_other_state->crtc != new_other_state->crtc)
10381 /* Src/dst size and scaling updates. */
10382 if (old_other_state->src_w != new_other_state->src_w ||
10383 old_other_state->src_h != new_other_state->src_h ||
10384 old_other_state->crtc_w != new_other_state->crtc_w ||
10385 old_other_state->crtc_h != new_other_state->crtc_h)
10388 /* Rotation / mirroring updates. */
10389 if (old_other_state->rotation != new_other_state->rotation)
10392 /* Blending updates. */
10393 if (old_other_state->pixel_blend_mode !=
10394 new_other_state->pixel_blend_mode)
10397 /* Alpha updates. */
10398 if (old_other_state->alpha != new_other_state->alpha)
10401 /* Colorspace changes. */
10402 if (old_other_state->color_range != new_other_state->color_range ||
10403 old_other_state->color_encoding != new_other_state->color_encoding)
10406 /* Framebuffer checks fall at the end. */
10407 if (!old_other_state->fb || !new_other_state->fb)
10410 /* Pixel format changes can require bandwidth updates. */
10411 if (old_other_state->fb->format != new_other_state->fb->format)
10414 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10415 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10417 /* Tiling and DCC changes also require bandwidth updates. */
10418 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10419 old_afb->base.modifier != new_afb->base.modifier)
10426 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10427 struct drm_plane_state *new_plane_state,
10428 struct drm_framebuffer *fb)
10430 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10431 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10432 unsigned int pitch;
10435 if (fb->width > new_acrtc->max_cursor_width ||
10436 fb->height > new_acrtc->max_cursor_height) {
10437 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10438 new_plane_state->fb->width,
10439 new_plane_state->fb->height);
10442 if (new_plane_state->src_w != fb->width << 16 ||
10443 new_plane_state->src_h != fb->height << 16) {
10444 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10448 /* Pitch in pixels */
10449 pitch = fb->pitches[0] / fb->format->cpp[0];
10451 if (fb->width != pitch) {
10452 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10461 /* FB pitch is supported by cursor plane */
10464 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10468 /* Core DRM takes care of checking FB modifiers, so we only need to
10469 * check tiling flags when the FB doesn't have a modifier. */
10470 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10471 if (adev->family < AMDGPU_FAMILY_AI) {
10472 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10473 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10474 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10476 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10479 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10487 static int dm_update_plane_state(struct dc *dc,
10488 struct drm_atomic_state *state,
10489 struct drm_plane *plane,
10490 struct drm_plane_state *old_plane_state,
10491 struct drm_plane_state *new_plane_state,
10493 bool *lock_and_validation_needed)
10496 struct dm_atomic_state *dm_state = NULL;
10497 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10498 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10499 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10500 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10501 struct amdgpu_crtc *new_acrtc;
10506 new_plane_crtc = new_plane_state->crtc;
10507 old_plane_crtc = old_plane_state->crtc;
10508 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10509 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10511 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10512 if (!enable || !new_plane_crtc ||
10513 drm_atomic_plane_disabling(plane->state, new_plane_state))
10516 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10518 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10519 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10523 if (new_plane_state->fb) {
10524 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10525 new_plane_state->fb);
10533 needs_reset = should_reset_plane(state, plane, old_plane_state,
10536 /* Remove any changed/removed planes */
10541 if (!old_plane_crtc)
10544 old_crtc_state = drm_atomic_get_old_crtc_state(
10545 state, old_plane_crtc);
10546 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10548 if (!dm_old_crtc_state->stream)
10551 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10552 plane->base.id, old_plane_crtc->base.id);
10554 ret = dm_atomic_get_state(state, &dm_state);
10558 if (!dc_remove_plane_from_context(
10560 dm_old_crtc_state->stream,
10561 dm_old_plane_state->dc_state,
10562 dm_state->context)) {
10568 dc_plane_state_release(dm_old_plane_state->dc_state);
10569 dm_new_plane_state->dc_state = NULL;
10571 *lock_and_validation_needed = true;
10573 } else { /* Add new planes */
10574 struct dc_plane_state *dc_new_plane_state;
10576 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10579 if (!new_plane_crtc)
10582 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10583 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10585 if (!dm_new_crtc_state->stream)
10591 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10595 WARN_ON(dm_new_plane_state->dc_state);
10597 dc_new_plane_state = dc_create_plane_state(dc);
10598 if (!dc_new_plane_state)
10601 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10602 plane->base.id, new_plane_crtc->base.id);
10604 ret = fill_dc_plane_attributes(
10605 drm_to_adev(new_plane_crtc->dev),
10606 dc_new_plane_state,
10610 dc_plane_state_release(dc_new_plane_state);
10614 ret = dm_atomic_get_state(state, &dm_state);
10616 dc_plane_state_release(dc_new_plane_state);
10621 * Any atomic check errors that occur after this will
10622 * not need a release. The plane state will be attached
10623 * to the stream, and therefore part of the atomic
10624 * state. It'll be released when the atomic state is
10627 if (!dc_add_plane_to_context(
10629 dm_new_crtc_state->stream,
10630 dc_new_plane_state,
10631 dm_state->context)) {
10633 dc_plane_state_release(dc_new_plane_state);
10637 dm_new_plane_state->dc_state = dc_new_plane_state;
10639 /* Tell DC to do a full surface update every time there
10640 * is a plane change. Inefficient, but works for now.
10642 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10644 *lock_and_validation_needed = true;
10651 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10652 struct drm_crtc *crtc,
10653 struct drm_crtc_state *new_crtc_state)
10655 struct drm_plane *cursor = crtc->cursor, *underlying;
10656 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10658 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10660 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10661 * cursor per pipe but it's going to inherit the scaling and
10662 * positioning from the underlying pipe. Check the cursor plane's
10663 * blending properties match the underlying planes'. */
10665 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10666 if (!new_cursor_state || !new_cursor_state->fb) {
10670 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10671 (new_cursor_state->src_w >> 16);
10672 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10673 (new_cursor_state->src_h >> 16);
10675 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10676 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10677 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10680 /* Ignore disabled planes */
10681 if (!new_underlying_state->fb)
10684 underlying_scale_w = new_underlying_state->crtc_w * 1000 /
10685 (new_underlying_state->src_w >> 16);
10686 underlying_scale_h = new_underlying_state->crtc_h * 1000 /
10687 (new_underlying_state->src_h >> 16);
10689 if (cursor_scale_w != underlying_scale_w ||
10690 cursor_scale_h != underlying_scale_h) {
10691 drm_dbg_atomic(crtc->dev,
10692 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10693 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10697 /* If this plane covers the whole CRTC, no need to check planes underneath */
10698 if (new_underlying_state->crtc_x <= 0 &&
10699 new_underlying_state->crtc_y <= 0 &&
10700 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10701 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10708 #if defined(CONFIG_DRM_AMD_DC_DCN)
10709 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10711 struct drm_connector *connector;
10712 struct drm_connector_state *conn_state;
10713 struct amdgpu_dm_connector *aconnector = NULL;
10715 for_each_new_connector_in_state(state, connector, conn_state, i) {
10716 if (conn_state->crtc != crtc)
10719 aconnector = to_amdgpu_dm_connector(connector);
10720 if (!aconnector->port || !aconnector->mst_port)
10729 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10734 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10735 * @dev: The DRM device
10736 * @state: The atomic state to commit
10738 * Validate that the given atomic state is programmable by DC into hardware.
10739 * This involves constructing a &struct dc_state reflecting the new hardware
10740 * state we wish to commit, then querying DC to see if it is programmable. It's
10741 * important not to modify the existing DC state. Otherwise, atomic_check
10742 * may unexpectedly commit hardware changes.
10744 * When validating the DC state, it's important that the right locks are
10745 * acquired. For full updates case which removes/adds/updates streams on one
10746 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10747 * that any such full update commit will wait for completion of any outstanding
10748 * flip using DRMs synchronization events.
10750 * Note that DM adds the affected connectors for all CRTCs in state, when that
10751 * might not seem necessary. This is because DC stream creation requires the
10752 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10753 * be possible but non-trivial - a possible TODO item.
10755 * Return: -Error code if validation failed.
10757 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10758 struct drm_atomic_state *state)
10760 struct amdgpu_device *adev = drm_to_adev(dev);
10761 struct dm_atomic_state *dm_state = NULL;
10762 struct dc *dc = adev->dm.dc;
10763 struct drm_connector *connector;
10764 struct drm_connector_state *old_con_state, *new_con_state;
10765 struct drm_crtc *crtc;
10766 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10767 struct drm_plane *plane;
10768 struct drm_plane_state *old_plane_state, *new_plane_state;
10769 enum dc_status status;
10771 bool lock_and_validation_needed = false;
10772 struct dm_crtc_state *dm_old_crtc_state;
10773 #if defined(CONFIG_DRM_AMD_DC_DCN)
10774 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10775 struct drm_dp_mst_topology_state *mst_state;
10776 struct drm_dp_mst_topology_mgr *mgr;
10779 trace_amdgpu_dm_atomic_check_begin(state);
10781 ret = drm_atomic_helper_check_modeset(dev, state);
10785 /* Check connector changes */
10786 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10787 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10788 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10790 /* Skip connectors that are disabled or part of modeset already. */
10791 if (!old_con_state->crtc && !new_con_state->crtc)
10794 if (!new_con_state->crtc)
10797 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10798 if (IS_ERR(new_crtc_state)) {
10799 ret = PTR_ERR(new_crtc_state);
10803 if (dm_old_con_state->abm_level !=
10804 dm_new_con_state->abm_level)
10805 new_crtc_state->connectors_changed = true;
10808 #if defined(CONFIG_DRM_AMD_DC_DCN)
10809 if (dc_resource_is_dsc_encoding_supported(dc)) {
10810 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10811 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10812 ret = add_affected_mst_dsc_crtcs(state, crtc);
10819 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10820 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10822 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10823 !new_crtc_state->color_mgmt_changed &&
10824 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10825 dm_old_crtc_state->dsc_force_changed == false)
10828 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10832 if (!new_crtc_state->enable)
10835 ret = drm_atomic_add_affected_connectors(state, crtc);
10839 ret = drm_atomic_add_affected_planes(state, crtc);
10843 if (dm_old_crtc_state->dsc_force_changed)
10844 new_crtc_state->mode_changed = true;
10848 * Add all primary and overlay planes on the CRTC to the state
10849 * whenever a plane is enabled to maintain correct z-ordering
10850 * and to enable fast surface updates.
10852 drm_for_each_crtc(crtc, dev) {
10853 bool modified = false;
10855 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10856 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10859 if (new_plane_state->crtc == crtc ||
10860 old_plane_state->crtc == crtc) {
10869 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10870 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10874 drm_atomic_get_plane_state(state, plane);
10876 if (IS_ERR(new_plane_state)) {
10877 ret = PTR_ERR(new_plane_state);
10883 /* Remove exiting planes if they are modified */
10884 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10885 ret = dm_update_plane_state(dc, state, plane,
10889 &lock_and_validation_needed);
10894 /* Disable all crtcs which require disable */
10895 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10896 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10900 &lock_and_validation_needed);
10905 /* Enable all crtcs which require enable */
10906 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10907 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10911 &lock_and_validation_needed);
10916 /* Add new/modified planes */
10917 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10918 ret = dm_update_plane_state(dc, state, plane,
10922 &lock_and_validation_needed);
10927 /* Run this here since we want to validate the streams we created */
10928 ret = drm_atomic_helper_check_planes(dev, state);
10932 /* Check cursor planes scaling */
10933 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10934 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10939 if (state->legacy_cursor_update) {
10941 * This is a fast cursor update coming from the plane update
10942 * helper, check if it can be done asynchronously for better
10945 state->async_update =
10946 !drm_atomic_helper_async_check(dev, state);
10949 * Skip the remaining global validation if this is an async
10950 * update. Cursor updates can be done without affecting
10951 * state or bandwidth calcs and this avoids the performance
10952 * penalty of locking the private state object and
10953 * allocating a new dc_state.
10955 if (state->async_update)
10959 /* Check scaling and underscan changes*/
10960 /* TODO Removed scaling changes validation due to inability to commit
10961 * new stream into context w\o causing full reset. Need to
10962 * decide how to handle.
10964 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10965 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10966 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10967 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10969 /* Skip any modesets/resets */
10970 if (!acrtc || drm_atomic_crtc_needs_modeset(
10971 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10974 /* Skip any thing not scale or underscan changes */
10975 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10978 lock_and_validation_needed = true;
10981 #if defined(CONFIG_DRM_AMD_DC_DCN)
10982 /* set the slot info for each mst_state based on the link encoding format */
10983 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10984 struct amdgpu_dm_connector *aconnector;
10985 struct drm_connector *connector;
10986 struct drm_connector_list_iter iter;
10987 u8 link_coding_cap;
10989 if (!mgr->mst_state )
10992 drm_connector_list_iter_begin(dev, &iter);
10993 drm_for_each_connector_iter(connector, &iter) {
10994 int id = connector->index;
10996 if (id == mst_state->mgr->conn_base_id) {
10997 aconnector = to_amdgpu_dm_connector(connector);
10998 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10999 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11004 drm_connector_list_iter_end(&iter);
11009 * Streams and planes are reset when there are changes that affect
11010 * bandwidth. Anything that affects bandwidth needs to go through
11011 * DC global validation to ensure that the configuration can be applied
11014 * We have to currently stall out here in atomic_check for outstanding
11015 * commits to finish in this case because our IRQ handlers reference
11016 * DRM state directly - we can end up disabling interrupts too early
11019 * TODO: Remove this stall and drop DM state private objects.
11021 if (lock_and_validation_needed) {
11022 ret = dm_atomic_get_state(state, &dm_state);
11026 ret = do_aquire_global_lock(dev, state);
11030 #if defined(CONFIG_DRM_AMD_DC_DCN)
11031 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
11034 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11040 * Perform validation of MST topology in the state:
11041 * We need to perform MST atomic check before calling
11042 * dc_validate_global_state(), or there is a chance
11043 * to get stuck in an infinite loop and hang eventually.
11045 ret = drm_dp_mst_atomic_check(state);
11048 status = dc_validate_global_state(dc, dm_state->context, false);
11049 if (status != DC_OK) {
11050 drm_dbg_atomic(dev,
11051 "DC global validation failure: %s (%d)",
11052 dc_status_to_str(status), status);
11058 * The commit is a fast update. Fast updates shouldn't change
11059 * the DC context, affect global validation, and can have their
11060 * commit work done in parallel with other commits not touching
11061 * the same resource. If we have a new DC context as part of
11062 * the DM atomic state from validation we need to free it and
11063 * retain the existing one instead.
11065 * Furthermore, since the DM atomic state only contains the DC
11066 * context and can safely be annulled, we can free the state
11067 * and clear the associated private object now to free
11068 * some memory and avoid a possible use-after-free later.
11071 for (i = 0; i < state->num_private_objs; i++) {
11072 struct drm_private_obj *obj = state->private_objs[i].ptr;
11074 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11075 int j = state->num_private_objs-1;
11077 dm_atomic_destroy_state(obj,
11078 state->private_objs[i].state);
11080 /* If i is not at the end of the array then the
11081 * last element needs to be moved to where i was
11082 * before the array can safely be truncated.
11085 state->private_objs[i] =
11086 state->private_objs[j];
11088 state->private_objs[j].ptr = NULL;
11089 state->private_objs[j].state = NULL;
11090 state->private_objs[j].old_state = NULL;
11091 state->private_objs[j].new_state = NULL;
11093 state->num_private_objs = j;
11099 /* Store the overall update type for use later in atomic check. */
11100 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11101 struct dm_crtc_state *dm_new_crtc_state =
11102 to_dm_crtc_state(new_crtc_state);
11104 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11109 /* Must be success */
11112 trace_amdgpu_dm_atomic_check_finish(state, ret);
11117 if (ret == -EDEADLK)
11118 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11119 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11120 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11122 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11124 trace_amdgpu_dm_atomic_check_finish(state, ret);
11129 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11130 struct amdgpu_dm_connector *amdgpu_dm_connector)
11133 bool capable = false;
11135 if (amdgpu_dm_connector->dc_link &&
11136 dm_helpers_dp_read_dpcd(
11138 amdgpu_dm_connector->dc_link,
11139 DP_DOWN_STREAM_PORT_COUNT,
11141 sizeof(dpcd_data))) {
11142 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11148 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11149 unsigned int offset,
11150 unsigned int total_length,
11152 unsigned int length,
11153 struct amdgpu_hdmi_vsdb_info *vsdb)
11156 union dmub_rb_cmd cmd;
11157 struct dmub_cmd_send_edid_cea *input;
11158 struct dmub_cmd_edid_cea_output *output;
11160 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11163 memset(&cmd, 0, sizeof(cmd));
11165 input = &cmd.edid_cea.data.input;
11167 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11168 cmd.edid_cea.header.sub_type = 0;
11169 cmd.edid_cea.header.payload_bytes =
11170 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11171 input->offset = offset;
11172 input->length = length;
11173 input->total_length = total_length;
11174 memcpy(input->payload, data, length);
11176 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11178 DRM_ERROR("EDID CEA parser failed\n");
11182 output = &cmd.edid_cea.data.output;
11184 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11185 if (!output->ack.success) {
11186 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11187 output->ack.offset);
11189 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11190 if (!output->amd_vsdb.vsdb_found)
11193 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11194 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11195 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11196 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11198 DRM_WARN("Unknown EDID CEA parser results\n");
11205 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11206 uint8_t *edid_ext, int len,
11207 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11211 /* send extension block to DMCU for parsing */
11212 for (i = 0; i < len; i += 8) {
11216 /* send 8 bytes a time */
11217 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11221 /* EDID block sent completed, expect result */
11222 int version, min_rate, max_rate;
11224 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11226 /* amd vsdb found */
11227 vsdb_info->freesync_supported = 1;
11228 vsdb_info->amd_vsdb_version = version;
11229 vsdb_info->min_refresh_rate_hz = min_rate;
11230 vsdb_info->max_refresh_rate_hz = max_rate;
11238 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11246 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11247 uint8_t *edid_ext, int len,
11248 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11252 /* send extension block to DMCU for parsing */
11253 for (i = 0; i < len; i += 8) {
11254 /* send 8 bytes a time */
11255 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11259 return vsdb_info->freesync_supported;
11262 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11263 uint8_t *edid_ext, int len,
11264 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11266 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11268 if (adev->dm.dmub_srv)
11269 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11271 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11274 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11275 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11277 uint8_t *edid_ext = NULL;
11279 bool valid_vsdb_found = false;
11281 /*----- drm_find_cea_extension() -----*/
11282 /* No EDID or EDID extensions */
11283 if (edid == NULL || edid->extensions == 0)
11286 /* Find CEA extension */
11287 for (i = 0; i < edid->extensions; i++) {
11288 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11289 if (edid_ext[0] == CEA_EXT)
11293 if (i == edid->extensions)
11296 /*----- cea_db_offsets() -----*/
11297 if (edid_ext[0] != CEA_EXT)
11300 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11302 return valid_vsdb_found ? i : -ENODEV;
11305 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11309 struct detailed_timing *timing;
11310 struct detailed_non_pixel *data;
11311 struct detailed_data_monitor_range *range;
11312 struct amdgpu_dm_connector *amdgpu_dm_connector =
11313 to_amdgpu_dm_connector(connector);
11314 struct dm_connector_state *dm_con_state = NULL;
11315 struct dc_sink *sink;
11317 struct drm_device *dev = connector->dev;
11318 struct amdgpu_device *adev = drm_to_adev(dev);
11319 bool freesync_capable = false;
11320 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11322 if (!connector->state) {
11323 DRM_ERROR("%s - Connector has no state", __func__);
11327 sink = amdgpu_dm_connector->dc_sink ?
11328 amdgpu_dm_connector->dc_sink :
11329 amdgpu_dm_connector->dc_em_sink;
11331 if (!edid || !sink) {
11332 dm_con_state = to_dm_connector_state(connector->state);
11334 amdgpu_dm_connector->min_vfreq = 0;
11335 amdgpu_dm_connector->max_vfreq = 0;
11336 amdgpu_dm_connector->pixel_clock_mhz = 0;
11337 connector->display_info.monitor_range.min_vfreq = 0;
11338 connector->display_info.monitor_range.max_vfreq = 0;
11339 freesync_capable = false;
11344 dm_con_state = to_dm_connector_state(connector->state);
11346 if (!adev->dm.freesync_module)
11350 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11351 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11352 bool edid_check_required = false;
11355 edid_check_required = is_dp_capable_without_timing_msa(
11357 amdgpu_dm_connector);
11360 if (edid_check_required == true && (edid->version > 1 ||
11361 (edid->version == 1 && edid->revision > 1))) {
11362 for (i = 0; i < 4; i++) {
11364 timing = &edid->detailed_timings[i];
11365 data = &timing->data.other_data;
11366 range = &data->data.range;
11368 * Check if monitor has continuous frequency mode
11370 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11373 * Check for flag range limits only. If flag == 1 then
11374 * no additional timing information provided.
11375 * Default GTF, GTF Secondary curve and CVT are not
11378 if (range->flags != 1)
11381 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11382 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11383 amdgpu_dm_connector->pixel_clock_mhz =
11384 range->pixel_clock_mhz * 10;
11386 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11387 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11392 if (amdgpu_dm_connector->max_vfreq -
11393 amdgpu_dm_connector->min_vfreq > 10) {
11395 freesync_capable = true;
11398 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11399 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11400 if (i >= 0 && vsdb_info.freesync_supported) {
11401 timing = &edid->detailed_timings[i];
11402 data = &timing->data.other_data;
11404 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11405 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11406 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11407 freesync_capable = true;
11409 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11410 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11416 dm_con_state->freesync_capable = freesync_capable;
11418 if (connector->vrr_capable_property)
11419 drm_connector_set_vrr_capable_property(connector,
11423 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11425 struct amdgpu_device *adev = drm_to_adev(dev);
11426 struct dc *dc = adev->dm.dc;
11429 mutex_lock(&adev->dm.dc_lock);
11430 if (dc->current_state) {
11431 for (i = 0; i < dc->current_state->stream_count; ++i)
11432 dc->current_state->streams[i]
11433 ->triggered_crtc_reset.enabled =
11434 adev->dm.force_timing_sync;
11436 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11437 dc_trigger_sync(dc, dc->current_state);
11439 mutex_unlock(&adev->dm.dc_lock);
11442 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11443 uint32_t value, const char *func_name)
11445 #ifdef DM_CHECK_ADDR_0
11446 if (address == 0) {
11447 DC_ERR("invalid register write. address = 0");
11451 cgs_write_register(ctx->cgs_device, address, value);
11452 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11455 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11456 const char *func_name)
11459 #ifdef DM_CHECK_ADDR_0
11460 if (address == 0) {
11461 DC_ERR("invalid register read; address = 0\n");
11466 if (ctx->dmub_srv &&
11467 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11468 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11473 value = cgs_read_register(ctx->cgs_device, address);
11475 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11480 int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11481 uint8_t status_type, uint32_t *operation_result)
11483 struct amdgpu_device *adev = ctx->driver_context;
11484 int return_status = -1;
11485 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11488 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11489 return_status = p_notify->aux_reply.length;
11490 *operation_result = p_notify->result;
11491 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11492 *operation_result = AUX_RET_ERROR_TIMEOUT;
11493 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11494 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11496 *operation_result = AUX_RET_ERROR_UNKNOWN;
11499 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11501 *operation_result = p_notify->sc_status;
11503 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11507 return return_status;
11510 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11511 unsigned int link_index, void *cmd_payload, void *operation_result)
11513 struct amdgpu_device *adev = ctx->driver_context;
11517 dc_process_dmub_aux_transfer_async(ctx->dc,
11518 link_index, (struct aux_payload *)cmd_payload);
11519 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11520 (struct set_config_cmd_payload *)cmd_payload,
11521 adev->dm.dmub_notify)) {
11522 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11523 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11524 (uint32_t *)operation_result);
11527 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11529 DRM_ERROR("wait_for_completion_timeout timeout!");
11530 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11531 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11532 (uint32_t *)operation_result);
11536 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11537 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11539 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11540 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11541 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11542 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11543 adev->dm.dmub_notify->aux_reply.length);
11548 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11549 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11550 (uint32_t *)operation_result);