2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
52 #include "amdgpu_pm.h"
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
61 #include "amdgpu_dm_psr.h"
63 #include "ivsrcid/ivsrcid_vislands30.h"
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
92 #include "soc15_common.h"
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133 * requests into DC requests, and DC responses into DRM responses.
135 * The root control structure is &struct amdgpu_display_manager.
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 switch (link->dpcd_caps.dongle_type) {
146 case DISPLAY_DONGLE_NONE:
147 return DRM_MODE_SUBCONNECTOR_Native;
148 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 return DRM_MODE_SUBCONNECTOR_VGA;
150 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_DVID;
153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_HDMIA;
156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 return DRM_MODE_SUBCONNECTOR_Unknown;
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 struct dc_link *link = aconnector->dc_link;
165 struct drm_connector *connector = &aconnector->base;
166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 if (aconnector->dc_sink)
172 subconnector = get_subconnector_type(link);
174 drm_object_property_set_value(&connector->base,
175 connector->dev->mode_config.dp_subconnector_property,
180 * initializes drm_device display related structures, based on the information
181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182 * drm_encoder, drm_mode_config
184 * Returns 0 on success
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 struct drm_plane *plane,
192 unsigned long possible_crtcs,
193 const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 struct drm_plane *plane,
196 uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 struct amdgpu_encoder *aencoder,
203 uint32_t link_index);
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 struct drm_atomic_state *state);
212 static void handle_cursor_update(struct drm_plane *plane,
213 struct drm_plane_state *old_plane_state);
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 struct drm_crtc_state *new_crtc_state);
222 * dm_vblank_get_counter
225 * Get counter for number of vertical blanks
228 * struct amdgpu_device *adev - [in] desired amdgpu device
229 * int disp_idx - [in] which CRTC to get the counter from
232 * Counter for vertical blanks
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
236 if (crtc >= adev->mode_info.num_crtc)
239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
241 if (acrtc->dm_irq_params.stream == NULL) {
242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 u32 *vbl, u32 *position)
254 uint32_t v_blank_start, v_blank_end, h_position, v_position;
256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
261 if (acrtc->dm_irq_params.stream == NULL) {
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 * TODO rework base driver to use values directly.
269 * for now parse it back into reg-format
271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 *position = v_position | (h_position << 16);
278 *vbl = v_blank_start | (v_blank_end << 16);
284 static bool dm_is_idle(void *handle)
290 static int dm_wait_for_idle(void *handle)
296 static bool dm_check_soft_reset(void *handle)
301 static int dm_soft_reset(void *handle)
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 struct drm_device *dev = adev_to_drm(adev);
312 struct drm_crtc *crtc;
313 struct amdgpu_crtc *amdgpu_crtc;
315 if (WARN_ON(otg_inst == -1))
316 return adev->mode_info.crtcs[0];
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
321 if (amdgpu_crtc->otg_inst == otg_inst)
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
360 static void dm_pflip_high_irq(void *interrupt_params)
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404 /* Update to correct count and vblank timestamp if racing with
405 * vblank irq. This also updates to the correct vblank timestamp
406 * even in VRR mode, as scanout is past the front-porch atm.
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 /* Wake up userspace by sending the pageflip event with proper
411 * count and timestamp of vblank of flip completion.
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 /* Event sent, so done with vblank for this flip */
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 /* VRR active and inside front-porch: vblank count and
421 * timestamp for pageflip event will only be up to date after
422 * drm_crtc_handle_vblank() has been executed from late vblank
423 * irq handler after start of back-porch (vline 0). We queue the
424 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 * updated timestamp and count, once it runs after us.
427 * We need to open-code this instead of using the helper
428 * drm_crtc_arm_vblank_event(), as that helper would
429 * call drm_crtc_accurate_vblank_count(), which we must
430 * not call in VRR mode while we are in front-porch!
433 /* sequence will be replaced by real count during send-out. */
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 /* Keep track of vblank of this flip for flip throttling. We use the
442 * cooked hw counter, as that one incremented at start of this vblank
443 * of pageflip completion, so last_flip_vblank is the forbidden count
444 * for queueing new pageflips if vsync + VRR is enabled.
446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
457 static void dm_vupdate_high_irq(void *interrupt_params)
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
462 struct drm_device *drm_dev;
463 struct drm_vblank_crtc *vblank;
464 ktime_t frame_duration_ns, previous_timestamp;
468 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 drm_dev = acrtc->base.dev;
473 vblank = &drm_dev->vblank[acrtc->base.index];
474 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 frame_duration_ns = vblank->time - previous_timestamp;
477 if (frame_duration_ns > 0) {
478 trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 /* Core vblank handling is done here after end of front-porch in
489 * vrr mode, as vblank timestamping will give valid results
490 * while now done after front-porch. This will also deliver
491 * page-flip completion events that have been queued to us
492 * if a pageflip happened inside front-porch.
495 drm_crtc_handle_vblank(&acrtc->base);
497 /* BTR processing for pre-DCE12 ASICs */
498 if (acrtc->dm_irq_params.stream &&
499 adev->family < AMDGPU_FAMILY_AI) {
500 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 mod_freesync_handle_v_update(
502 adev->dm.freesync_module,
503 acrtc->dm_irq_params.stream,
504 &acrtc->dm_irq_params.vrr_params);
506 dc_stream_adjust_vmin_vmax(
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params.adjust);
510 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
517 * dm_crtc_high_irq() - Handles CRTC interrupt
518 * @interrupt_params: used for determining the CRTC instance
520 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523 static void dm_crtc_high_irq(void *interrupt_params)
525 struct common_irq_params *irq_params = interrupt_params;
526 struct amdgpu_device *adev = irq_params->adev;
527 struct amdgpu_crtc *acrtc;
531 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 vrr_active, acrtc->dm_irq_params.active_planes);
541 * Core vblank handling at start of front-porch is only possible
542 * in non-vrr mode, as only there vblank timestamping will give
543 * valid results while done in front-porch. Otherwise defer it
544 * to dm_vupdate_high_irq after end of front-porch.
547 drm_crtc_handle_vblank(&acrtc->base);
550 * Following stuff must happen at start of vblank, for crc
551 * computation and below-the-range btr support in vrr mode.
553 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555 /* BTR updates need to happen before VUPDATE on Vega and above. */
556 if (adev->family < AMDGPU_FAMILY_AI)
559 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561 if (acrtc->dm_irq_params.stream &&
562 acrtc->dm_irq_params.vrr_params.supported &&
563 acrtc->dm_irq_params.freesync_config.state ==
564 VRR_STATE_ACTIVE_VARIABLE) {
565 mod_freesync_handle_v_update(adev->dm.freesync_module,
566 acrtc->dm_irq_params.stream,
567 &acrtc->dm_irq_params.vrr_params);
569 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params.adjust);
574 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 * In that case, pageflip completion interrupts won't fire and pageflip
576 * completion events won't get delivered. Prevent this by sending
577 * pending pageflip events from here if a flip is still pending.
579 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 * avoid race conditions between flip programming and completion,
581 * which could cause too early flip completion events.
583 if (adev->family >= AMDGPU_FAMILY_RV &&
584 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 acrtc->dm_irq_params.active_planes == 0) {
587 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 drm_crtc_vblank_put(&acrtc->base);
591 acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt_params: interrupt parameters
604 * Used to set crc window/read out crc value at vertical line 0 position
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 struct common_irq_params *irq_params = interrupt_params;
609 struct amdgpu_device *adev = irq_params->adev;
610 struct amdgpu_crtc *acrtc;
612 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 #define DMUB_TRACE_MAX_READ 64
623 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624 * @interrupt_params: used for determining the Outbox instance
626 * Handles the Outbox Interrupt
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
631 struct dmub_notification notify;
632 struct common_irq_params *irq_params = interrupt_params;
633 struct amdgpu_device *adev = irq_params->adev;
634 struct amdgpu_display_manager *dm = &adev->dm;
635 struct dmcub_trace_buf_entry entry = { 0 };
638 if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
641 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
642 } while (notify.pending_notification);
644 if (adev->dm.dmub_notify)
645 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
646 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 complete(&adev->dm.dmub_aux_transfer_done);
648 // TODO : HPD Implementation
651 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 entry.param0, entry.param1);
661 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668 } while (count <= DMUB_TRACE_MAX_READ);
670 ASSERT(count <= DMUB_TRACE_MAX_READ);
674 static int dm_set_clockgating_state(void *handle,
675 enum amd_clockgating_state state)
680 static int dm_set_powergating_state(void *handle,
681 enum amd_powergating_state state)
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
689 /* Allocate memory for FBC compressed data */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
692 struct drm_device *dev = connector->dev;
693 struct amdgpu_device *adev = drm_to_adev(dev);
694 struct dm_compressor_info *compressor = &adev->dm.compressor;
695 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 struct drm_display_mode *mode;
697 unsigned long max_size = 0;
699 if (adev->dm.dc->fbc_compressor == NULL)
702 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
705 if (compressor->bo_ptr)
709 list_for_each_entry(mode, &connector->modes, head) {
710 if (max_size < mode->htotal * mode->vtotal)
711 max_size = mode->htotal * mode->vtotal;
715 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 &compressor->gpu_addr, &compressor->cpu_addr);
720 DRM_ERROR("DM: Failed to initialize FBC\n");
722 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 int pipe, bool *enabled,
732 unsigned char *buf, int max_bytes)
734 struct drm_device *dev = dev_get_drvdata(kdev);
735 struct amdgpu_device *adev = drm_to_adev(dev);
736 struct drm_connector *connector;
737 struct drm_connector_list_iter conn_iter;
738 struct amdgpu_dm_connector *aconnector;
743 mutex_lock(&adev->dm.audio_lock);
745 drm_connector_list_iter_begin(dev, &conn_iter);
746 drm_for_each_connector_iter(connector, &conn_iter) {
747 aconnector = to_amdgpu_dm_connector(connector);
748 if (aconnector->audio_inst != port)
752 ret = drm_eld_size(connector->eld);
753 memcpy(buf, connector->eld, min(max_bytes, ret));
757 drm_connector_list_iter_end(&conn_iter);
759 mutex_unlock(&adev->dm.audio_lock);
761 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 .get_eld = amdgpu_dm_audio_component_get_eld,
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 struct device *hda_kdev, void *data)
773 struct drm_device *dev = dev_get_drvdata(kdev);
774 struct amdgpu_device *adev = drm_to_adev(dev);
775 struct drm_audio_component *acomp = data;
777 acomp->ops = &amdgpu_dm_audio_component_ops;
779 adev->dm.audio_component = acomp;
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 struct device *hda_kdev, void *data)
787 struct drm_device *dev = dev_get_drvdata(kdev);
788 struct amdgpu_device *adev = drm_to_adev(dev);
789 struct drm_audio_component *acomp = data;
793 adev->dm.audio_component = NULL;
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 .bind = amdgpu_dm_audio_component_bind,
798 .unbind = amdgpu_dm_audio_component_unbind,
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808 adev->mode_info.audio.enabled = true;
810 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
812 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 adev->mode_info.audio.pin[i].channels = -1;
814 adev->mode_info.audio.pin[i].rate = -1;
815 adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 adev->mode_info.audio.pin[i].status_bits = 0;
817 adev->mode_info.audio.pin[i].category_code = 0;
818 adev->mode_info.audio.pin[i].connected = false;
819 adev->mode_info.audio.pin[i].id =
820 adev->dm.dc->res_pool->audios[i]->inst;
821 adev->mode_info.audio.pin[i].offset = 0;
824 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828 adev->dm.audio_registered = true;
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
838 if (!adev->mode_info.audio.enabled)
841 if (adev->dm.audio_registered) {
842 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 adev->dm.audio_registered = false;
846 /* TODO: Disable audio? */
848 adev->mode_info.audio.enabled = false;
851 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
853 struct drm_audio_component *acomp = adev->dm.audio_component;
855 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
858 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
865 const struct dmcub_firmware_header_v1_0 *hdr;
866 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 struct abm *abm = adev->dm.dc->res_pool->abm;
871 struct dmub_srv_hw_params hw_params;
872 enum dmub_status status;
873 const unsigned char *fw_inst_const, *fw_bss_data;
874 uint32_t i, fw_inst_const_size, fw_bss_data_size;
878 /* DMUB isn't supported on the ASIC. */
882 DRM_ERROR("No framebuffer info for DMUB service.\n");
887 /* Firmware required for DMUB support. */
888 DRM_ERROR("No firmware provided for DMUB.\n");
892 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 if (status != DMUB_STATUS_OK) {
894 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898 if (!has_hw_support) {
899 DRM_INFO("DMUB unsupported on ASIC\n");
903 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
905 fw_inst_const = dmub_fw->data +
906 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
909 fw_bss_data = dmub_fw->data +
910 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 le32_to_cpu(hdr->inst_const_bytes);
913 /* Copy firmware and bios info into FB memory. */
914 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
917 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
919 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 * amdgpu_ucode_init_single_fw will load dmub firmware
921 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 * will be done by dm_dmub_hw_init
924 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929 if (fw_bss_data_size)
930 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 fw_bss_data, fw_bss_data_size);
933 /* Copy firmware bios info into FB memory. */
934 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 /* Reset regions that need to be reset. */
938 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
941 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
944 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
947 /* Initialize hardware. */
948 memset(&hw_params, 0, sizeof(hw_params));
949 hw_params.fb_base = adev->gmc.fb_start;
950 hw_params.fb_offset = adev->gmc.aper_base;
952 /* backdoor load firmware and trigger dmub running */
953 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 hw_params.load_inst_const = true;
957 hw_params.psp_version = dmcu->psp_version;
959 for (i = 0; i < fb_info->num_fb; ++i)
960 hw_params.fb[i] = &fb_info->fb[i];
962 status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 if (status != DMUB_STATUS_OK) {
964 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968 /* Wait for firmware load to finish. */
969 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 if (status != DMUB_STATUS_OK)
971 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
973 /* Init DMCU and ABM if available. */
975 dmcu->funcs->dmcu_init(dmcu);
976 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 if (!adev->dm.dc->ctx->dmub_srv)
980 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 if (!adev->dm.dc->ctx->dmub_srv) {
982 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
986 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 adev->dm.dmcub_fw_version);
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 uint32_t logical_addr_low;
997 uint32_t logical_addr_high;
998 uint32_t agp_base, agp_bot, agp_top;
999 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1001 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1004 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1006 * Raven2 has a HW issue that it is unable to use the vram which
1007 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 * workaround that increase system aperture high address (add 1)
1009 * to get rid of the VM fault and hardware hang.
1011 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1013 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016 agp_bot = adev->gmc.agp_start >> 24;
1017 agp_top = adev->gmc.agp_end >> 24;
1020 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 page_table_base.low_part = lower_32_bits(pt_base);
1027 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1030 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1034 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1038 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1042 pa_config->is_hvm_enabled = 0;
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void vblank_control_worker(struct work_struct *work)
1049 struct vblank_control_work *vblank_work =
1050 container_of(work, struct vblank_control_work, work);
1051 struct amdgpu_display_manager *dm = vblank_work->dm;
1053 mutex_lock(&dm->dc_lock);
1055 if (vblank_work->enable)
1056 dm->active_vblank_irq_count++;
1057 else if(dm->active_vblank_irq_count)
1058 dm->active_vblank_irq_count--;
1060 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1062 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1064 /* Control PSR based on vblank requirements from OS */
1065 if (vblank_work->stream && vblank_work->stream->link) {
1066 if (vblank_work->enable) {
1067 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1068 amdgpu_dm_psr_disable(vblank_work->stream);
1069 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1070 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1071 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1072 amdgpu_dm_psr_enable(vblank_work->stream);
1076 mutex_unlock(&dm->dc_lock);
1078 dc_stream_release(vblank_work->stream);
1084 static int amdgpu_dm_init(struct amdgpu_device *adev)
1086 struct dc_init_data init_data;
1087 #ifdef CONFIG_DRM_AMD_DC_HDCP
1088 struct dc_callback_init init_params;
1092 adev->dm.ddev = adev_to_drm(adev);
1093 adev->dm.adev = adev;
1095 /* Zero all the fields */
1096 memset(&init_data, 0, sizeof(init_data));
1097 #ifdef CONFIG_DRM_AMD_DC_HDCP
1098 memset(&init_params, 0, sizeof(init_params));
1101 mutex_init(&adev->dm.dc_lock);
1102 mutex_init(&adev->dm.audio_lock);
1103 #if defined(CONFIG_DRM_AMD_DC_DCN)
1104 spin_lock_init(&adev->dm.vblank_lock);
1107 if(amdgpu_dm_irq_init(adev)) {
1108 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1112 init_data.asic_id.chip_family = adev->family;
1114 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1115 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1117 init_data.asic_id.vram_width = adev->gmc.vram_width;
1118 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1119 init_data.asic_id.atombios_base_address =
1120 adev->mode_info.atom_context->bios;
1122 init_data.driver = adev;
1124 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1126 if (!adev->dm.cgs_device) {
1127 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1131 init_data.cgs_device = adev->dm.cgs_device;
1133 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1135 switch (adev->asic_type) {
1140 init_data.flags.gpu_vm_support = true;
1141 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1142 init_data.flags.disable_dmcu = true;
1145 case CHIP_YELLOW_CARP:
1146 init_data.flags.gpu_vm_support = true;
1152 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1153 init_data.flags.fbc_support = true;
1155 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1156 init_data.flags.multi_mon_pp_mclk_switch = true;
1158 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1159 init_data.flags.disable_fractional_pwm = true;
1161 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1162 init_data.flags.edp_no_power_sequencing = true;
1164 init_data.flags.power_down_display_on_boot = true;
1166 INIT_LIST_HEAD(&adev->dm.da_list);
1167 /* Display Core create. */
1168 adev->dm.dc = dc_create(&init_data);
1171 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1173 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1177 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1178 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1179 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1182 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1183 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1185 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1186 adev->dm.dc->debug.disable_stutter = true;
1188 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1189 adev->dm.dc->debug.disable_dsc = true;
1191 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1192 adev->dm.dc->debug.disable_clock_gate = true;
1194 r = dm_dmub_hw_init(adev);
1196 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1200 dc_hardware_init(adev->dm.dc);
1202 #if defined(CONFIG_DRM_AMD_DC_DCN)
1203 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1204 struct dc_phy_addr_space_config pa_config;
1206 mmhub_read_system_context(adev, &pa_config);
1208 // Call the DC init_memory func
1209 dc_setup_system_context(adev->dm.dc, &pa_config);
1213 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1214 if (!adev->dm.freesync_module) {
1216 "amdgpu: failed to initialize freesync_module.\n");
1218 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1219 adev->dm.freesync_module);
1221 amdgpu_dm_init_color_mod();
1223 #if defined(CONFIG_DRM_AMD_DC_DCN)
1224 if (adev->dm.dc->caps.max_links > 0) {
1225 adev->dm.vblank_control_workqueue =
1226 create_singlethread_workqueue("dm_vblank_control_workqueue");
1227 if (!adev->dm.vblank_control_workqueue)
1228 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1232 #ifdef CONFIG_DRM_AMD_DC_HDCP
1233 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1234 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1236 if (!adev->dm.hdcp_workqueue)
1237 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1239 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1241 dc_init_callbacks(adev->dm.dc, &init_params);
1244 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1245 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1247 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1248 init_completion(&adev->dm.dmub_aux_transfer_done);
1249 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1250 if (!adev->dm.dmub_notify) {
1251 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1254 amdgpu_dm_outbox_init(adev);
1257 if (amdgpu_dm_initialize_drm_device(adev)) {
1259 "amdgpu: failed to initialize sw for display support.\n");
1263 /* create fake encoders for MST */
1264 dm_dp_create_fake_mst_encoders(adev);
1266 /* TODO: Add_display_info? */
1268 /* TODO use dynamic cursor width */
1269 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1270 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1272 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1274 "amdgpu: failed to initialize sw for display support.\n");
1279 DRM_DEBUG_DRIVER("KMS initialized.\n");
1283 amdgpu_dm_fini(adev);
1288 static int amdgpu_dm_early_fini(void *handle)
1290 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292 amdgpu_dm_audio_fini(adev);
1297 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1301 #if defined(CONFIG_DRM_AMD_DC_DCN)
1302 if (adev->dm.vblank_control_workqueue) {
1303 destroy_workqueue(adev->dm.vblank_control_workqueue);
1304 adev->dm.vblank_control_workqueue = NULL;
1308 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1309 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1312 amdgpu_dm_destroy_drm_device(&adev->dm);
1314 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1315 if (adev->dm.crc_rd_wrk) {
1316 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1317 kfree(adev->dm.crc_rd_wrk);
1318 adev->dm.crc_rd_wrk = NULL;
1321 #ifdef CONFIG_DRM_AMD_DC_HDCP
1322 if (adev->dm.hdcp_workqueue) {
1323 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1324 adev->dm.hdcp_workqueue = NULL;
1328 dc_deinit_callbacks(adev->dm.dc);
1331 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1333 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1334 kfree(adev->dm.dmub_notify);
1335 adev->dm.dmub_notify = NULL;
1338 if (adev->dm.dmub_bo)
1339 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1340 &adev->dm.dmub_bo_gpu_addr,
1341 &adev->dm.dmub_bo_cpu_addr);
1343 /* DC Destroy TODO: Replace destroy DAL */
1345 dc_destroy(&adev->dm.dc);
1347 * TODO: pageflip, vlank interrupt
1349 * amdgpu_dm_irq_fini(adev);
1352 if (adev->dm.cgs_device) {
1353 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1354 adev->dm.cgs_device = NULL;
1356 if (adev->dm.freesync_module) {
1357 mod_freesync_destroy(adev->dm.freesync_module);
1358 adev->dm.freesync_module = NULL;
1361 mutex_destroy(&adev->dm.audio_lock);
1362 mutex_destroy(&adev->dm.dc_lock);
1367 static int load_dmcu_fw(struct amdgpu_device *adev)
1369 const char *fw_name_dmcu = NULL;
1371 const struct dmcu_firmware_header_v1_0 *hdr;
1373 switch(adev->asic_type) {
1374 #if defined(CONFIG_DRM_AMD_DC_SI)
1389 case CHIP_POLARIS11:
1390 case CHIP_POLARIS10:
1391 case CHIP_POLARIS12:
1399 case CHIP_SIENNA_CICHLID:
1400 case CHIP_NAVY_FLOUNDER:
1401 case CHIP_DIMGREY_CAVEFISH:
1402 case CHIP_BEIGE_GOBY:
1404 case CHIP_YELLOW_CARP:
1407 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1410 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1411 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1412 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1413 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1418 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1422 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1423 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1427 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1429 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1430 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1431 adev->dm.fw_dmcu = NULL;
1435 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1440 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1442 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1444 release_firmware(adev->dm.fw_dmcu);
1445 adev->dm.fw_dmcu = NULL;
1449 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1450 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1451 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1452 adev->firmware.fw_size +=
1453 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1455 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1457 adev->firmware.fw_size +=
1458 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1462 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1467 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1469 struct amdgpu_device *adev = ctx;
1471 return dm_read_reg(adev->dm.dc->ctx, address);
1474 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1477 struct amdgpu_device *adev = ctx;
1479 return dm_write_reg(adev->dm.dc->ctx, address, value);
1482 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1484 struct dmub_srv_create_params create_params;
1485 struct dmub_srv_region_params region_params;
1486 struct dmub_srv_region_info region_info;
1487 struct dmub_srv_fb_params fb_params;
1488 struct dmub_srv_fb_info *fb_info;
1489 struct dmub_srv *dmub_srv;
1490 const struct dmcub_firmware_header_v1_0 *hdr;
1491 const char *fw_name_dmub;
1492 enum dmub_asic dmub_asic;
1493 enum dmub_status status;
1496 switch (adev->asic_type) {
1498 dmub_asic = DMUB_ASIC_DCN21;
1499 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1500 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1501 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1503 case CHIP_SIENNA_CICHLID:
1504 dmub_asic = DMUB_ASIC_DCN30;
1505 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1507 case CHIP_NAVY_FLOUNDER:
1508 dmub_asic = DMUB_ASIC_DCN30;
1509 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1512 dmub_asic = DMUB_ASIC_DCN301;
1513 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1515 case CHIP_DIMGREY_CAVEFISH:
1516 dmub_asic = DMUB_ASIC_DCN302;
1517 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1519 case CHIP_BEIGE_GOBY:
1520 dmub_asic = DMUB_ASIC_DCN303;
1521 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1523 case CHIP_YELLOW_CARP:
1524 dmub_asic = DMUB_ASIC_DCN31;
1525 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1529 /* ASIC doesn't support DMUB. */
1533 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1535 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1539 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1541 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1545 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1546 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1548 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1549 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1550 AMDGPU_UCODE_ID_DMCUB;
1551 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1553 adev->firmware.fw_size +=
1554 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1556 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1557 adev->dm.dmcub_fw_version);
1561 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1562 dmub_srv = adev->dm.dmub_srv;
1565 DRM_ERROR("Failed to allocate DMUB service!\n");
1569 memset(&create_params, 0, sizeof(create_params));
1570 create_params.user_ctx = adev;
1571 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1572 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1573 create_params.asic = dmub_asic;
1575 /* Create the DMUB service. */
1576 status = dmub_srv_create(dmub_srv, &create_params);
1577 if (status != DMUB_STATUS_OK) {
1578 DRM_ERROR("Error creating DMUB service: %d\n", status);
1582 /* Calculate the size of all the regions for the DMUB service. */
1583 memset(®ion_params, 0, sizeof(region_params));
1585 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1586 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1587 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1588 region_params.vbios_size = adev->bios_size;
1589 region_params.fw_bss_data = region_params.bss_data_size ?
1590 adev->dm.dmub_fw->data +
1591 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1592 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1593 region_params.fw_inst_const =
1594 adev->dm.dmub_fw->data +
1595 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1598 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1601 if (status != DMUB_STATUS_OK) {
1602 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1607 * Allocate a framebuffer based on the total size of all the regions.
1608 * TODO: Move this into GART.
1610 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1611 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1612 &adev->dm.dmub_bo_gpu_addr,
1613 &adev->dm.dmub_bo_cpu_addr);
1617 /* Rebase the regions on the framebuffer address. */
1618 memset(&fb_params, 0, sizeof(fb_params));
1619 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1620 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1621 fb_params.region_info = ®ion_info;
1623 adev->dm.dmub_fb_info =
1624 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1625 fb_info = adev->dm.dmub_fb_info;
1629 "Failed to allocate framebuffer info for DMUB service!\n");
1633 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1634 if (status != DMUB_STATUS_OK) {
1635 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1642 static int dm_sw_init(void *handle)
1644 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1647 r = dm_dmub_sw_init(adev);
1651 return load_dmcu_fw(adev);
1654 static int dm_sw_fini(void *handle)
1656 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1658 kfree(adev->dm.dmub_fb_info);
1659 adev->dm.dmub_fb_info = NULL;
1661 if (adev->dm.dmub_srv) {
1662 dmub_srv_destroy(adev->dm.dmub_srv);
1663 adev->dm.dmub_srv = NULL;
1666 release_firmware(adev->dm.dmub_fw);
1667 adev->dm.dmub_fw = NULL;
1669 release_firmware(adev->dm.fw_dmcu);
1670 adev->dm.fw_dmcu = NULL;
1675 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1677 struct amdgpu_dm_connector *aconnector;
1678 struct drm_connector *connector;
1679 struct drm_connector_list_iter iter;
1682 drm_connector_list_iter_begin(dev, &iter);
1683 drm_for_each_connector_iter(connector, &iter) {
1684 aconnector = to_amdgpu_dm_connector(connector);
1685 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1686 aconnector->mst_mgr.aux) {
1687 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1689 aconnector->base.base.id);
1691 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1693 DRM_ERROR("DM_MST: Failed to start MST\n");
1694 aconnector->dc_link->type =
1695 dc_connection_single;
1700 drm_connector_list_iter_end(&iter);
1705 static int dm_late_init(void *handle)
1707 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1709 struct dmcu_iram_parameters params;
1710 unsigned int linear_lut[16];
1712 struct dmcu *dmcu = NULL;
1714 dmcu = adev->dm.dc->res_pool->dmcu;
1716 for (i = 0; i < 16; i++)
1717 linear_lut[i] = 0xFFFF * i / 15;
1720 params.backlight_ramping_start = 0xCCCC;
1721 params.backlight_ramping_reduction = 0xCCCCCCCC;
1722 params.backlight_lut_array_size = 16;
1723 params.backlight_lut_array = linear_lut;
1725 /* Min backlight level after ABM reduction, Don't allow below 1%
1726 * 0xFFFF x 0.01 = 0x28F
1728 params.min_abm_backlight = 0x28F;
1729 /* In the case where abm is implemented on dmcub,
1730 * dmcu object will be null.
1731 * ABM 2.4 and up are implemented on dmcub.
1734 if (!dmcu_load_iram(dmcu, params))
1736 } else if (adev->dm.dc->ctx->dmub_srv) {
1737 struct dc_link *edp_links[MAX_NUM_EDP];
1740 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1741 for (i = 0; i < edp_num; i++) {
1742 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1747 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1750 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1752 struct amdgpu_dm_connector *aconnector;
1753 struct drm_connector *connector;
1754 struct drm_connector_list_iter iter;
1755 struct drm_dp_mst_topology_mgr *mgr;
1757 bool need_hotplug = false;
1759 drm_connector_list_iter_begin(dev, &iter);
1760 drm_for_each_connector_iter(connector, &iter) {
1761 aconnector = to_amdgpu_dm_connector(connector);
1762 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1763 aconnector->mst_port)
1766 mgr = &aconnector->mst_mgr;
1769 drm_dp_mst_topology_mgr_suspend(mgr);
1771 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1773 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1774 need_hotplug = true;
1778 drm_connector_list_iter_end(&iter);
1781 drm_kms_helper_hotplug_event(dev);
1784 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1786 struct smu_context *smu = &adev->smu;
1789 if (!is_support_sw_smu(adev))
1792 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1793 * on window driver dc implementation.
1794 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1795 * should be passed to smu during boot up and resume from s3.
1796 * boot up: dc calculate dcn watermark clock settings within dc_create,
1797 * dcn20_resource_construct
1798 * then call pplib functions below to pass the settings to smu:
1799 * smu_set_watermarks_for_clock_ranges
1800 * smu_set_watermarks_table
1801 * navi10_set_watermarks_table
1802 * smu_write_watermarks_table
1804 * For Renoir, clock settings of dcn watermark are also fixed values.
1805 * dc has implemented different flow for window driver:
1806 * dc_hardware_init / dc_set_power_state
1811 * smu_set_watermarks_for_clock_ranges
1812 * renoir_set_watermarks_table
1813 * smu_write_watermarks_table
1816 * dc_hardware_init -> amdgpu_dm_init
1817 * dc_set_power_state --> dm_resume
1819 * therefore, this function apply to navi10/12/14 but not Renoir
1822 switch(adev->asic_type) {
1831 ret = smu_write_watermarks_table(smu);
1833 DRM_ERROR("Failed to update WMTABLE!\n");
1841 * dm_hw_init() - Initialize DC device
1842 * @handle: The base driver device containing the amdgpu_dm device.
1844 * Initialize the &struct amdgpu_display_manager device. This involves calling
1845 * the initializers of each DM component, then populating the struct with them.
1847 * Although the function implies hardware initialization, both hardware and
1848 * software are initialized here. Splitting them out to their relevant init
1849 * hooks is a future TODO item.
1851 * Some notable things that are initialized here:
1853 * - Display Core, both software and hardware
1854 * - DC modules that we need (freesync and color management)
1855 * - DRM software states
1856 * - Interrupt sources and handlers
1858 * - Debug FS entries, if enabled
1860 static int dm_hw_init(void *handle)
1862 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1863 /* Create DAL display manager */
1864 amdgpu_dm_init(adev);
1865 amdgpu_dm_hpd_init(adev);
1871 * dm_hw_fini() - Teardown DC device
1872 * @handle: The base driver device containing the amdgpu_dm device.
1874 * Teardown components within &struct amdgpu_display_manager that require
1875 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1876 * were loaded. Also flush IRQ workqueues and disable them.
1878 static int dm_hw_fini(void *handle)
1880 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1882 amdgpu_dm_hpd_fini(adev);
1884 amdgpu_dm_irq_fini(adev);
1885 amdgpu_dm_fini(adev);
1890 static int dm_enable_vblank(struct drm_crtc *crtc);
1891 static void dm_disable_vblank(struct drm_crtc *crtc);
1893 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1894 struct dc_state *state, bool enable)
1896 enum dc_irq_source irq_source;
1897 struct amdgpu_crtc *acrtc;
1901 for (i = 0; i < state->stream_count; i++) {
1902 acrtc = get_crtc_by_otg_inst(
1903 adev, state->stream_status[i].primary_otg_inst);
1905 if (acrtc && state->stream_status[i].plane_count != 0) {
1906 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1907 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1908 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1909 acrtc->crtc_id, enable ? "en" : "dis", rc);
1911 DRM_WARN("Failed to %s pflip interrupts\n",
1912 enable ? "enable" : "disable");
1915 rc = dm_enable_vblank(&acrtc->base);
1917 DRM_WARN("Failed to enable vblank interrupts\n");
1919 dm_disable_vblank(&acrtc->base);
1927 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1929 struct dc_state *context = NULL;
1930 enum dc_status res = DC_ERROR_UNEXPECTED;
1932 struct dc_stream_state *del_streams[MAX_PIPES];
1933 int del_streams_count = 0;
1935 memset(del_streams, 0, sizeof(del_streams));
1937 context = dc_create_state(dc);
1938 if (context == NULL)
1939 goto context_alloc_fail;
1941 dc_resource_state_copy_construct_current(dc, context);
1943 /* First remove from context all streams */
1944 for (i = 0; i < context->stream_count; i++) {
1945 struct dc_stream_state *stream = context->streams[i];
1947 del_streams[del_streams_count++] = stream;
1950 /* Remove all planes for removed streams and then remove the streams */
1951 for (i = 0; i < del_streams_count; i++) {
1952 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1953 res = DC_FAIL_DETACH_SURFACES;
1957 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1963 res = dc_validate_global_state(dc, context, false);
1966 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1970 res = dc_commit_state(dc, context);
1973 dc_release_state(context);
1979 static int dm_suspend(void *handle)
1981 struct amdgpu_device *adev = handle;
1982 struct amdgpu_display_manager *dm = &adev->dm;
1985 if (amdgpu_in_reset(adev)) {
1986 mutex_lock(&dm->dc_lock);
1988 #if defined(CONFIG_DRM_AMD_DC_DCN)
1989 dc_allow_idle_optimizations(adev->dm.dc, false);
1992 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1994 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1996 amdgpu_dm_commit_zero_streams(dm->dc);
1998 amdgpu_dm_irq_suspend(adev);
2003 WARN_ON(adev->dm.cached_state);
2004 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2006 s3_handle_mst(adev_to_drm(adev), true);
2008 amdgpu_dm_irq_suspend(adev);
2010 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2015 static struct amdgpu_dm_connector *
2016 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2017 struct drm_crtc *crtc)
2020 struct drm_connector_state *new_con_state;
2021 struct drm_connector *connector;
2022 struct drm_crtc *crtc_from_state;
2024 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2025 crtc_from_state = new_con_state->crtc;
2027 if (crtc_from_state == crtc)
2028 return to_amdgpu_dm_connector(connector);
2034 static void emulated_link_detect(struct dc_link *link)
2036 struct dc_sink_init_data sink_init_data = { 0 };
2037 struct display_sink_capability sink_caps = { 0 };
2038 enum dc_edid_status edid_status;
2039 struct dc_context *dc_ctx = link->ctx;
2040 struct dc_sink *sink = NULL;
2041 struct dc_sink *prev_sink = NULL;
2043 link->type = dc_connection_none;
2044 prev_sink = link->local_sink;
2047 dc_sink_release(prev_sink);
2049 switch (link->connector_signal) {
2050 case SIGNAL_TYPE_HDMI_TYPE_A: {
2051 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2052 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2056 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2057 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2058 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2062 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2063 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2064 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2068 case SIGNAL_TYPE_LVDS: {
2069 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2070 sink_caps.signal = SIGNAL_TYPE_LVDS;
2074 case SIGNAL_TYPE_EDP: {
2075 sink_caps.transaction_type =
2076 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2077 sink_caps.signal = SIGNAL_TYPE_EDP;
2081 case SIGNAL_TYPE_DISPLAY_PORT: {
2082 sink_caps.transaction_type =
2083 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2084 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2089 DC_ERROR("Invalid connector type! signal:%d\n",
2090 link->connector_signal);
2094 sink_init_data.link = link;
2095 sink_init_data.sink_signal = sink_caps.signal;
2097 sink = dc_sink_create(&sink_init_data);
2099 DC_ERROR("Failed to create sink!\n");
2103 /* dc_sink_create returns a new reference */
2104 link->local_sink = sink;
2106 edid_status = dm_helpers_read_local_edid(
2111 if (edid_status != EDID_OK)
2112 DC_ERROR("Failed to read EDID");
2116 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2117 struct amdgpu_display_manager *dm)
2120 struct dc_surface_update surface_updates[MAX_SURFACES];
2121 struct dc_plane_info plane_infos[MAX_SURFACES];
2122 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2123 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2124 struct dc_stream_update stream_update;
2128 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2131 dm_error("Failed to allocate update bundle\n");
2135 for (k = 0; k < dc_state->stream_count; k++) {
2136 bundle->stream_update.stream = dc_state->streams[k];
2138 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2139 bundle->surface_updates[m].surface =
2140 dc_state->stream_status->plane_states[m];
2141 bundle->surface_updates[m].surface->force_full_update =
2144 dc_commit_updates_for_stream(
2145 dm->dc, bundle->surface_updates,
2146 dc_state->stream_status->plane_count,
2147 dc_state->streams[k], &bundle->stream_update, dc_state);
2156 static void dm_set_dpms_off(struct dc_link *link)
2158 struct dc_stream_state *stream_state;
2159 struct amdgpu_dm_connector *aconnector = link->priv;
2160 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2161 struct dc_stream_update stream_update;
2162 bool dpms_off = true;
2164 memset(&stream_update, 0, sizeof(stream_update));
2165 stream_update.dpms_off = &dpms_off;
2167 mutex_lock(&adev->dm.dc_lock);
2168 stream_state = dc_stream_find_from_link(link);
2170 if (stream_state == NULL) {
2171 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2172 mutex_unlock(&adev->dm.dc_lock);
2176 stream_update.stream = stream_state;
2177 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2178 stream_state, &stream_update,
2179 stream_state->ctx->dc->current_state);
2180 mutex_unlock(&adev->dm.dc_lock);
2183 static int dm_resume(void *handle)
2185 struct amdgpu_device *adev = handle;
2186 struct drm_device *ddev = adev_to_drm(adev);
2187 struct amdgpu_display_manager *dm = &adev->dm;
2188 struct amdgpu_dm_connector *aconnector;
2189 struct drm_connector *connector;
2190 struct drm_connector_list_iter iter;
2191 struct drm_crtc *crtc;
2192 struct drm_crtc_state *new_crtc_state;
2193 struct dm_crtc_state *dm_new_crtc_state;
2194 struct drm_plane *plane;
2195 struct drm_plane_state *new_plane_state;
2196 struct dm_plane_state *dm_new_plane_state;
2197 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2198 enum dc_connection_type new_connection_type = dc_connection_none;
2199 struct dc_state *dc_state;
2202 if (amdgpu_in_reset(adev)) {
2203 dc_state = dm->cached_dc_state;
2205 r = dm_dmub_hw_init(adev);
2207 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2209 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2212 amdgpu_dm_irq_resume_early(adev);
2214 for (i = 0; i < dc_state->stream_count; i++) {
2215 dc_state->streams[i]->mode_changed = true;
2216 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2217 dc_state->stream_status->plane_states[j]->update_flags.raw
2221 #if defined(CONFIG_DRM_AMD_DC_DCN)
2223 * Resource allocation happens for link encoders for newer ASIC in
2224 * dc_validate_global_state, so we need to revalidate it.
2226 * This shouldn't fail (it passed once before), so warn if it does.
2228 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2231 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2233 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2235 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2237 dc_release_state(dm->cached_dc_state);
2238 dm->cached_dc_state = NULL;
2240 amdgpu_dm_irq_resume_late(adev);
2242 mutex_unlock(&dm->dc_lock);
2246 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2247 dc_release_state(dm_state->context);
2248 dm_state->context = dc_create_state(dm->dc);
2249 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2250 dc_resource_state_construct(dm->dc, dm_state->context);
2252 /* Before powering on DC we need to re-initialize DMUB. */
2253 r = dm_dmub_hw_init(adev);
2255 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2257 /* power on hardware */
2258 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2260 /* program HPD filter */
2264 * early enable HPD Rx IRQ, should be done before set mode as short
2265 * pulse interrupts are used for MST
2267 amdgpu_dm_irq_resume_early(adev);
2269 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2270 s3_handle_mst(ddev, false);
2273 drm_connector_list_iter_begin(ddev, &iter);
2274 drm_for_each_connector_iter(connector, &iter) {
2275 aconnector = to_amdgpu_dm_connector(connector);
2278 * this is the case when traversing through already created
2279 * MST connectors, should be skipped
2281 if (aconnector->mst_port)
2284 mutex_lock(&aconnector->hpd_lock);
2285 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2286 DRM_ERROR("KMS: Failed to detect connector\n");
2288 if (aconnector->base.force && new_connection_type == dc_connection_none)
2289 emulated_link_detect(aconnector->dc_link);
2291 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2293 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2294 aconnector->fake_enable = false;
2296 if (aconnector->dc_sink)
2297 dc_sink_release(aconnector->dc_sink);
2298 aconnector->dc_sink = NULL;
2299 amdgpu_dm_update_connector_after_detect(aconnector);
2300 mutex_unlock(&aconnector->hpd_lock);
2302 drm_connector_list_iter_end(&iter);
2304 /* Force mode set in atomic commit */
2305 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2306 new_crtc_state->active_changed = true;
2309 * atomic_check is expected to create the dc states. We need to release
2310 * them here, since they were duplicated as part of the suspend
2313 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2314 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2315 if (dm_new_crtc_state->stream) {
2316 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2317 dc_stream_release(dm_new_crtc_state->stream);
2318 dm_new_crtc_state->stream = NULL;
2322 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2323 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2324 if (dm_new_plane_state->dc_state) {
2325 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2326 dc_plane_state_release(dm_new_plane_state->dc_state);
2327 dm_new_plane_state->dc_state = NULL;
2331 drm_atomic_helper_resume(ddev, dm->cached_state);
2333 dm->cached_state = NULL;
2335 amdgpu_dm_irq_resume_late(adev);
2337 amdgpu_dm_smu_write_watermarks_table(adev);
2345 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2346 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2347 * the base driver's device list to be initialized and torn down accordingly.
2349 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2352 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2354 .early_init = dm_early_init,
2355 .late_init = dm_late_init,
2356 .sw_init = dm_sw_init,
2357 .sw_fini = dm_sw_fini,
2358 .early_fini = amdgpu_dm_early_fini,
2359 .hw_init = dm_hw_init,
2360 .hw_fini = dm_hw_fini,
2361 .suspend = dm_suspend,
2362 .resume = dm_resume,
2363 .is_idle = dm_is_idle,
2364 .wait_for_idle = dm_wait_for_idle,
2365 .check_soft_reset = dm_check_soft_reset,
2366 .soft_reset = dm_soft_reset,
2367 .set_clockgating_state = dm_set_clockgating_state,
2368 .set_powergating_state = dm_set_powergating_state,
2371 const struct amdgpu_ip_block_version dm_ip_block =
2373 .type = AMD_IP_BLOCK_TYPE_DCE,
2377 .funcs = &amdgpu_dm_funcs,
2387 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2388 .fb_create = amdgpu_display_user_framebuffer_create,
2389 .get_format_info = amd_get_format_info,
2390 .output_poll_changed = drm_fb_helper_output_poll_changed,
2391 .atomic_check = amdgpu_dm_atomic_check,
2392 .atomic_commit = drm_atomic_helper_commit,
2395 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2396 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2399 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2401 u32 max_cll, min_cll, max, min, q, r;
2402 struct amdgpu_dm_backlight_caps *caps;
2403 struct amdgpu_display_manager *dm;
2404 struct drm_connector *conn_base;
2405 struct amdgpu_device *adev;
2406 struct dc_link *link = NULL;
2407 static const u8 pre_computed_values[] = {
2408 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2409 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2412 if (!aconnector || !aconnector->dc_link)
2415 link = aconnector->dc_link;
2416 if (link->connector_signal != SIGNAL_TYPE_EDP)
2419 conn_base = &aconnector->base;
2420 adev = drm_to_adev(conn_base->dev);
2422 for (i = 0; i < dm->num_of_edps; i++) {
2423 if (link == dm->backlight_link[i])
2426 if (i >= dm->num_of_edps)
2428 caps = &dm->backlight_caps[i];
2429 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2430 caps->aux_support = false;
2431 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2432 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2434 if (caps->ext_caps->bits.oled == 1 /*||
2435 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2436 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2437 caps->aux_support = true;
2439 if (amdgpu_backlight == 0)
2440 caps->aux_support = false;
2441 else if (amdgpu_backlight == 1)
2442 caps->aux_support = true;
2444 /* From the specification (CTA-861-G), for calculating the maximum
2445 * luminance we need to use:
2446 * Luminance = 50*2**(CV/32)
2447 * Where CV is a one-byte value.
2448 * For calculating this expression we may need float point precision;
2449 * to avoid this complexity level, we take advantage that CV is divided
2450 * by a constant. From the Euclids division algorithm, we know that CV
2451 * can be written as: CV = 32*q + r. Next, we replace CV in the
2452 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2453 * need to pre-compute the value of r/32. For pre-computing the values
2454 * We just used the following Ruby line:
2455 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2456 * The results of the above expressions can be verified at
2457 * pre_computed_values.
2461 max = (1 << q) * pre_computed_values[r];
2463 // min luminance: maxLum * (CV/255)^2 / 100
2464 q = DIV_ROUND_CLOSEST(min_cll, 255);
2465 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2467 caps->aux_max_input_signal = max;
2468 caps->aux_min_input_signal = min;
2471 void amdgpu_dm_update_connector_after_detect(
2472 struct amdgpu_dm_connector *aconnector)
2474 struct drm_connector *connector = &aconnector->base;
2475 struct drm_device *dev = connector->dev;
2476 struct dc_sink *sink;
2478 /* MST handled by drm_mst framework */
2479 if (aconnector->mst_mgr.mst_state == true)
2482 sink = aconnector->dc_link->local_sink;
2484 dc_sink_retain(sink);
2487 * Edid mgmt connector gets first update only in mode_valid hook and then
2488 * the connector sink is set to either fake or physical sink depends on link status.
2489 * Skip if already done during boot.
2491 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2492 && aconnector->dc_em_sink) {
2495 * For S3 resume with headless use eml_sink to fake stream
2496 * because on resume connector->sink is set to NULL
2498 mutex_lock(&dev->mode_config.mutex);
2501 if (aconnector->dc_sink) {
2502 amdgpu_dm_update_freesync_caps(connector, NULL);
2504 * retain and release below are used to
2505 * bump up refcount for sink because the link doesn't point
2506 * to it anymore after disconnect, so on next crtc to connector
2507 * reshuffle by UMD we will get into unwanted dc_sink release
2509 dc_sink_release(aconnector->dc_sink);
2511 aconnector->dc_sink = sink;
2512 dc_sink_retain(aconnector->dc_sink);
2513 amdgpu_dm_update_freesync_caps(connector,
2516 amdgpu_dm_update_freesync_caps(connector, NULL);
2517 if (!aconnector->dc_sink) {
2518 aconnector->dc_sink = aconnector->dc_em_sink;
2519 dc_sink_retain(aconnector->dc_sink);
2523 mutex_unlock(&dev->mode_config.mutex);
2526 dc_sink_release(sink);
2531 * TODO: temporary guard to look for proper fix
2532 * if this sink is MST sink, we should not do anything
2534 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2535 dc_sink_release(sink);
2539 if (aconnector->dc_sink == sink) {
2541 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2544 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2545 aconnector->connector_id);
2547 dc_sink_release(sink);
2551 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2552 aconnector->connector_id, aconnector->dc_sink, sink);
2554 mutex_lock(&dev->mode_config.mutex);
2557 * 1. Update status of the drm connector
2558 * 2. Send an event and let userspace tell us what to do
2562 * TODO: check if we still need the S3 mode update workaround.
2563 * If yes, put it here.
2565 if (aconnector->dc_sink) {
2566 amdgpu_dm_update_freesync_caps(connector, NULL);
2567 dc_sink_release(aconnector->dc_sink);
2570 aconnector->dc_sink = sink;
2571 dc_sink_retain(aconnector->dc_sink);
2572 if (sink->dc_edid.length == 0) {
2573 aconnector->edid = NULL;
2574 if (aconnector->dc_link->aux_mode) {
2575 drm_dp_cec_unset_edid(
2576 &aconnector->dm_dp_aux.aux);
2580 (struct edid *)sink->dc_edid.raw_edid;
2582 drm_connector_update_edid_property(connector,
2584 if (aconnector->dc_link->aux_mode)
2585 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2589 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2590 update_connector_ext_caps(aconnector);
2592 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2593 amdgpu_dm_update_freesync_caps(connector, NULL);
2594 drm_connector_update_edid_property(connector, NULL);
2595 aconnector->num_modes = 0;
2596 dc_sink_release(aconnector->dc_sink);
2597 aconnector->dc_sink = NULL;
2598 aconnector->edid = NULL;
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2601 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2602 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2606 mutex_unlock(&dev->mode_config.mutex);
2608 update_subconnector_property(aconnector);
2611 dc_sink_release(sink);
2614 static void handle_hpd_irq(void *param)
2616 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2617 struct drm_connector *connector = &aconnector->base;
2618 struct drm_device *dev = connector->dev;
2619 enum dc_connection_type new_connection_type = dc_connection_none;
2620 struct amdgpu_device *adev = drm_to_adev(dev);
2621 #ifdef CONFIG_DRM_AMD_DC_HDCP
2622 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2625 if (adev->dm.disable_hpd_irq)
2629 * In case of failure or MST no need to update connector status or notify the OS
2630 * since (for MST case) MST does this in its own context.
2632 mutex_lock(&aconnector->hpd_lock);
2634 #ifdef CONFIG_DRM_AMD_DC_HDCP
2635 if (adev->dm.hdcp_workqueue) {
2636 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2637 dm_con_state->update_hdcp = true;
2640 if (aconnector->fake_enable)
2641 aconnector->fake_enable = false;
2643 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2644 DRM_ERROR("KMS: Failed to detect connector\n");
2646 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2647 emulated_link_detect(aconnector->dc_link);
2650 drm_modeset_lock_all(dev);
2651 dm_restore_drm_connector_state(dev, connector);
2652 drm_modeset_unlock_all(dev);
2654 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2655 drm_kms_helper_hotplug_event(dev);
2657 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2658 if (new_connection_type == dc_connection_none &&
2659 aconnector->dc_link->type == dc_connection_none)
2660 dm_set_dpms_off(aconnector->dc_link);
2662 amdgpu_dm_update_connector_after_detect(aconnector);
2664 drm_modeset_lock_all(dev);
2665 dm_restore_drm_connector_state(dev, connector);
2666 drm_modeset_unlock_all(dev);
2668 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2669 drm_kms_helper_hotplug_event(dev);
2671 mutex_unlock(&aconnector->hpd_lock);
2675 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2677 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2679 bool new_irq_handled = false;
2681 int dpcd_bytes_to_read;
2683 const int max_process_count = 30;
2684 int process_count = 0;
2686 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2688 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2689 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2690 /* DPCD 0x200 - 0x201 for downstream IRQ */
2691 dpcd_addr = DP_SINK_COUNT;
2693 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2694 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2695 dpcd_addr = DP_SINK_COUNT_ESI;
2698 dret = drm_dp_dpcd_read(
2699 &aconnector->dm_dp_aux.aux,
2702 dpcd_bytes_to_read);
2704 while (dret == dpcd_bytes_to_read &&
2705 process_count < max_process_count) {
2711 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2712 /* handle HPD short pulse irq */
2713 if (aconnector->mst_mgr.mst_state)
2715 &aconnector->mst_mgr,
2719 if (new_irq_handled) {
2720 /* ACK at DPCD to notify down stream */
2721 const int ack_dpcd_bytes_to_write =
2722 dpcd_bytes_to_read - 1;
2724 for (retry = 0; retry < 3; retry++) {
2727 wret = drm_dp_dpcd_write(
2728 &aconnector->dm_dp_aux.aux,
2731 ack_dpcd_bytes_to_write);
2732 if (wret == ack_dpcd_bytes_to_write)
2736 /* check if there is new irq to be handled */
2737 dret = drm_dp_dpcd_read(
2738 &aconnector->dm_dp_aux.aux,
2741 dpcd_bytes_to_read);
2743 new_irq_handled = false;
2749 if (process_count == max_process_count)
2750 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2753 static void handle_hpd_rx_irq(void *param)
2755 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2756 struct drm_connector *connector = &aconnector->base;
2757 struct drm_device *dev = connector->dev;
2758 struct dc_link *dc_link = aconnector->dc_link;
2759 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2760 bool result = false;
2761 enum dc_connection_type new_connection_type = dc_connection_none;
2762 struct amdgpu_device *adev = drm_to_adev(dev);
2763 union hpd_irq_data hpd_irq_data;
2766 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2768 if (adev->dm.disable_hpd_irq)
2773 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2774 * conflict, after implement i2c helper, this mutex should be
2777 mutex_lock(&aconnector->hpd_lock);
2779 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2781 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2782 (dc_link->type == dc_connection_mst_branch)) {
2783 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2785 dm_handle_hpd_rx_irq(aconnector);
2787 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2789 dm_handle_hpd_rx_irq(aconnector);
2795 * TODO: We need the lock to avoid touching DC state while it's being
2796 * modified during automated compliance testing, or when link loss
2797 * happens. While this should be split into subhandlers and proper
2798 * interfaces to avoid having to conditionally lock like this in the
2799 * outer layer, we need this workaround temporarily to allow MST
2800 * lightup in some scenarios to avoid timeout.
2802 if (!amdgpu_in_reset(adev) &&
2803 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2804 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2805 mutex_lock(&adev->dm.dc_lock);
2809 #ifdef CONFIG_DRM_AMD_DC_HDCP
2810 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2812 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2814 if (!amdgpu_in_reset(adev) && lock_flag)
2815 mutex_unlock(&adev->dm.dc_lock);
2818 if (result && !is_mst_root_connector) {
2819 /* Downstream Port status changed. */
2820 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2821 DRM_ERROR("KMS: Failed to detect connector\n");
2823 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2824 emulated_link_detect(dc_link);
2826 if (aconnector->fake_enable)
2827 aconnector->fake_enable = false;
2829 amdgpu_dm_update_connector_after_detect(aconnector);
2832 drm_modeset_lock_all(dev);
2833 dm_restore_drm_connector_state(dev, connector);
2834 drm_modeset_unlock_all(dev);
2836 drm_kms_helper_hotplug_event(dev);
2837 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2839 if (aconnector->fake_enable)
2840 aconnector->fake_enable = false;
2842 amdgpu_dm_update_connector_after_detect(aconnector);
2845 drm_modeset_lock_all(dev);
2846 dm_restore_drm_connector_state(dev, connector);
2847 drm_modeset_unlock_all(dev);
2849 drm_kms_helper_hotplug_event(dev);
2852 #ifdef CONFIG_DRM_AMD_DC_HDCP
2853 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2854 if (adev->dm.hdcp_workqueue)
2855 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2859 if (dc_link->type != dc_connection_mst_branch)
2860 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2862 mutex_unlock(&aconnector->hpd_lock);
2865 static void register_hpd_handlers(struct amdgpu_device *adev)
2867 struct drm_device *dev = adev_to_drm(adev);
2868 struct drm_connector *connector;
2869 struct amdgpu_dm_connector *aconnector;
2870 const struct dc_link *dc_link;
2871 struct dc_interrupt_params int_params = {0};
2873 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2874 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2876 list_for_each_entry(connector,
2877 &dev->mode_config.connector_list, head) {
2879 aconnector = to_amdgpu_dm_connector(connector);
2880 dc_link = aconnector->dc_link;
2882 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2883 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2884 int_params.irq_source = dc_link->irq_source_hpd;
2886 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2888 (void *) aconnector);
2891 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2893 /* Also register for DP short pulse (hpd_rx). */
2894 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2895 int_params.irq_source = dc_link->irq_source_hpd_rx;
2897 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2899 (void *) aconnector);
2904 #if defined(CONFIG_DRM_AMD_DC_SI)
2905 /* Register IRQ sources and initialize IRQ callbacks */
2906 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2908 struct dc *dc = adev->dm.dc;
2909 struct common_irq_params *c_irq_params;
2910 struct dc_interrupt_params int_params = {0};
2913 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2915 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2916 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2919 * Actions of amdgpu_irq_add_id():
2920 * 1. Register a set() function with base driver.
2921 * Base driver will call set() function to enable/disable an
2922 * interrupt in DC hardware.
2923 * 2. Register amdgpu_dm_irq_handler().
2924 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2925 * coming from DC hardware.
2926 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2927 * for acknowledging and handling. */
2929 /* Use VBLANK interrupt */
2930 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2931 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2933 DRM_ERROR("Failed to add crtc irq id!\n");
2937 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 int_params.irq_source =
2939 dc_interrupt_to_irq_source(dc, i+1 , 0);
2941 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2943 c_irq_params->adev = adev;
2944 c_irq_params->irq_src = int_params.irq_source;
2946 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2947 dm_crtc_high_irq, c_irq_params);
2950 /* Use GRPH_PFLIP interrupt */
2951 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2952 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2953 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2955 DRM_ERROR("Failed to add page flip irq id!\n");
2959 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2960 int_params.irq_source =
2961 dc_interrupt_to_irq_source(dc, i, 0);
2963 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2965 c_irq_params->adev = adev;
2966 c_irq_params->irq_src = int_params.irq_source;
2968 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2969 dm_pflip_high_irq, c_irq_params);
2974 r = amdgpu_irq_add_id(adev, client_id,
2975 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2977 DRM_ERROR("Failed to add hpd irq id!\n");
2981 register_hpd_handlers(adev);
2987 /* Register IRQ sources and initialize IRQ callbacks */
2988 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2990 struct dc *dc = adev->dm.dc;
2991 struct common_irq_params *c_irq_params;
2992 struct dc_interrupt_params int_params = {0};
2995 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2997 if (adev->asic_type >= CHIP_VEGA10)
2998 client_id = SOC15_IH_CLIENTID_DCE;
3000 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3001 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3004 * Actions of amdgpu_irq_add_id():
3005 * 1. Register a set() function with base driver.
3006 * Base driver will call set() function to enable/disable an
3007 * interrupt in DC hardware.
3008 * 2. Register amdgpu_dm_irq_handler().
3009 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3010 * coming from DC hardware.
3011 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3012 * for acknowledging and handling. */
3014 /* Use VBLANK interrupt */
3015 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3016 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3018 DRM_ERROR("Failed to add crtc irq id!\n");
3022 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3023 int_params.irq_source =
3024 dc_interrupt_to_irq_source(dc, i, 0);
3026 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3028 c_irq_params->adev = adev;
3029 c_irq_params->irq_src = int_params.irq_source;
3031 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3032 dm_crtc_high_irq, c_irq_params);
3035 /* Use VUPDATE interrupt */
3036 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3037 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3039 DRM_ERROR("Failed to add vupdate irq id!\n");
3043 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3044 int_params.irq_source =
3045 dc_interrupt_to_irq_source(dc, i, 0);
3047 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3049 c_irq_params->adev = adev;
3050 c_irq_params->irq_src = int_params.irq_source;
3052 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3053 dm_vupdate_high_irq, c_irq_params);
3056 /* Use GRPH_PFLIP interrupt */
3057 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3058 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3059 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3061 DRM_ERROR("Failed to add page flip irq id!\n");
3065 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3066 int_params.irq_source =
3067 dc_interrupt_to_irq_source(dc, i, 0);
3069 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3071 c_irq_params->adev = adev;
3072 c_irq_params->irq_src = int_params.irq_source;
3074 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3075 dm_pflip_high_irq, c_irq_params);
3080 r = amdgpu_irq_add_id(adev, client_id,
3081 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3083 DRM_ERROR("Failed to add hpd irq id!\n");
3087 register_hpd_handlers(adev);
3092 #if defined(CONFIG_DRM_AMD_DC_DCN)
3093 /* Register IRQ sources and initialize IRQ callbacks */
3094 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3096 struct dc *dc = adev->dm.dc;
3097 struct common_irq_params *c_irq_params;
3098 struct dc_interrupt_params int_params = {0};
3101 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3102 static const unsigned int vrtl_int_srcid[] = {
3103 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3104 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3105 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3106 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3107 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3108 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3112 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3113 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3116 * Actions of amdgpu_irq_add_id():
3117 * 1. Register a set() function with base driver.
3118 * Base driver will call set() function to enable/disable an
3119 * interrupt in DC hardware.
3120 * 2. Register amdgpu_dm_irq_handler().
3121 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3122 * coming from DC hardware.
3123 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3124 * for acknowledging and handling.
3127 /* Use VSTARTUP interrupt */
3128 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3129 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3131 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3134 DRM_ERROR("Failed to add crtc irq id!\n");
3138 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3139 int_params.irq_source =
3140 dc_interrupt_to_irq_source(dc, i, 0);
3142 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3144 c_irq_params->adev = adev;
3145 c_irq_params->irq_src = int_params.irq_source;
3147 amdgpu_dm_irq_register_interrupt(
3148 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3151 /* Use otg vertical line interrupt */
3152 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3153 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3154 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3155 vrtl_int_srcid[i], &adev->vline0_irq);
3158 DRM_ERROR("Failed to add vline0 irq id!\n");
3162 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3163 int_params.irq_source =
3164 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3166 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3167 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3171 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3172 - DC_IRQ_SOURCE_DC1_VLINE0];
3174 c_irq_params->adev = adev;
3175 c_irq_params->irq_src = int_params.irq_source;
3177 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3178 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3182 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3183 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3184 * to trigger at end of each vblank, regardless of state of the lock,
3185 * matching DCE behaviour.
3187 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3188 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3190 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3193 DRM_ERROR("Failed to add vupdate irq id!\n");
3197 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3198 int_params.irq_source =
3199 dc_interrupt_to_irq_source(dc, i, 0);
3201 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3203 c_irq_params->adev = adev;
3204 c_irq_params->irq_src = int_params.irq_source;
3206 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3207 dm_vupdate_high_irq, c_irq_params);
3210 /* Use GRPH_PFLIP interrupt */
3211 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3212 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3214 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3216 DRM_ERROR("Failed to add page flip irq id!\n");
3220 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3221 int_params.irq_source =
3222 dc_interrupt_to_irq_source(dc, i, 0);
3224 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3226 c_irq_params->adev = adev;
3227 c_irq_params->irq_src = int_params.irq_source;
3229 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3230 dm_pflip_high_irq, c_irq_params);
3235 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3238 DRM_ERROR("Failed to add hpd irq id!\n");
3242 register_hpd_handlers(adev);
3246 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3247 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3249 struct dc *dc = adev->dm.dc;
3250 struct common_irq_params *c_irq_params;
3251 struct dc_interrupt_params int_params = {0};
3254 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3255 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3257 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3258 &adev->dmub_outbox_irq);
3260 DRM_ERROR("Failed to add outbox irq id!\n");
3264 if (dc->ctx->dmub_srv) {
3265 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3266 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3267 int_params.irq_source =
3268 dc_interrupt_to_irq_source(dc, i, 0);
3270 c_irq_params = &adev->dm.dmub_outbox_params[0];
3272 c_irq_params->adev = adev;
3273 c_irq_params->irq_src = int_params.irq_source;
3275 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3276 dm_dmub_outbox1_low_irq, c_irq_params);
3284 * Acquires the lock for the atomic state object and returns
3285 * the new atomic state.
3287 * This should only be called during atomic check.
3289 static int dm_atomic_get_state(struct drm_atomic_state *state,
3290 struct dm_atomic_state **dm_state)
3292 struct drm_device *dev = state->dev;
3293 struct amdgpu_device *adev = drm_to_adev(dev);
3294 struct amdgpu_display_manager *dm = &adev->dm;
3295 struct drm_private_state *priv_state;
3300 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3301 if (IS_ERR(priv_state))
3302 return PTR_ERR(priv_state);
3304 *dm_state = to_dm_atomic_state(priv_state);
3309 static struct dm_atomic_state *
3310 dm_atomic_get_new_state(struct drm_atomic_state *state)
3312 struct drm_device *dev = state->dev;
3313 struct amdgpu_device *adev = drm_to_adev(dev);
3314 struct amdgpu_display_manager *dm = &adev->dm;
3315 struct drm_private_obj *obj;
3316 struct drm_private_state *new_obj_state;
3319 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3320 if (obj->funcs == dm->atomic_obj.funcs)
3321 return to_dm_atomic_state(new_obj_state);
3327 static struct drm_private_state *
3328 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3330 struct dm_atomic_state *old_state, *new_state;
3332 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3336 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3338 old_state = to_dm_atomic_state(obj->state);
3340 if (old_state && old_state->context)
3341 new_state->context = dc_copy_state(old_state->context);
3343 if (!new_state->context) {
3348 return &new_state->base;
3351 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3352 struct drm_private_state *state)
3354 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3356 if (dm_state && dm_state->context)
3357 dc_release_state(dm_state->context);
3362 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3363 .atomic_duplicate_state = dm_atomic_duplicate_state,
3364 .atomic_destroy_state = dm_atomic_destroy_state,
3367 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3369 struct dm_atomic_state *state;
3372 adev->mode_info.mode_config_initialized = true;
3374 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3375 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3377 adev_to_drm(adev)->mode_config.max_width = 16384;
3378 adev_to_drm(adev)->mode_config.max_height = 16384;
3380 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3381 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3382 /* indicates support for immediate flip */
3383 adev_to_drm(adev)->mode_config.async_page_flip = true;
3385 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3387 state = kzalloc(sizeof(*state), GFP_KERNEL);
3391 state->context = dc_create_state(adev->dm.dc);
3392 if (!state->context) {
3397 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3399 drm_atomic_private_obj_init(adev_to_drm(adev),
3400 &adev->dm.atomic_obj,
3402 &dm_atomic_state_funcs);
3404 r = amdgpu_display_modeset_create_props(adev);
3406 dc_release_state(state->context);
3411 r = amdgpu_dm_audio_init(adev);
3413 dc_release_state(state->context);
3421 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3422 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3423 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3425 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3426 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3428 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3431 #if defined(CONFIG_ACPI)
3432 struct amdgpu_dm_backlight_caps caps;
3434 memset(&caps, 0, sizeof(caps));
3436 if (dm->backlight_caps[bl_idx].caps_valid)
3439 amdgpu_acpi_get_backlight_caps(&caps);
3440 if (caps.caps_valid) {
3441 dm->backlight_caps[bl_idx].caps_valid = true;
3442 if (caps.aux_support)
3444 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3445 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3447 dm->backlight_caps[bl_idx].min_input_signal =
3448 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3449 dm->backlight_caps[bl_idx].max_input_signal =
3450 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3453 if (dm->backlight_caps[bl_idx].aux_support)
3456 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3457 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3461 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3462 unsigned *min, unsigned *max)
3467 if (caps->aux_support) {
3468 // Firmware limits are in nits, DC API wants millinits.
3469 *max = 1000 * caps->aux_max_input_signal;
3470 *min = 1000 * caps->aux_min_input_signal;
3472 // Firmware limits are 8-bit, PWM control is 16-bit.
3473 *max = 0x101 * caps->max_input_signal;
3474 *min = 0x101 * caps->min_input_signal;
3479 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3480 uint32_t brightness)
3484 if (!get_brightness_range(caps, &min, &max))
3487 // Rescale 0..255 to min..max
3488 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3489 AMDGPU_MAX_BL_LEVEL);
3492 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3493 uint32_t brightness)
3497 if (!get_brightness_range(caps, &min, &max))
3500 if (brightness < min)
3502 // Rescale min..max to 0..255
3503 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3507 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3509 u32 user_brightness)
3511 struct amdgpu_dm_backlight_caps caps;
3512 struct dc_link *link;
3516 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3517 caps = dm->backlight_caps[bl_idx];
3519 dm->brightness[bl_idx] = user_brightness;
3520 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3521 link = (struct dc_link *)dm->backlight_link[bl_idx];
3523 /* Change brightness based on AUX property */
3524 if (caps.aux_support) {
3525 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3526 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3528 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3530 rc = dc_link_set_backlight_level(link, brightness, 0);
3532 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3538 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3540 struct amdgpu_display_manager *dm = bl_get_data(bd);
3543 for (i = 0; i < dm->num_of_edps; i++) {
3544 if (bd == dm->backlight_dev[i])
3547 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3549 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3554 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3557 struct amdgpu_dm_backlight_caps caps;
3558 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3560 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3561 caps = dm->backlight_caps[bl_idx];
3563 if (caps.aux_support) {
3567 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3569 return dm->brightness[bl_idx];
3570 return convert_brightness_to_user(&caps, avg);
3572 int ret = dc_link_get_backlight_level(link);
3574 if (ret == DC_ERROR_UNEXPECTED)
3575 return dm->brightness[bl_idx];
3576 return convert_brightness_to_user(&caps, ret);
3580 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3582 struct amdgpu_display_manager *dm = bl_get_data(bd);
3585 for (i = 0; i < dm->num_of_edps; i++) {
3586 if (bd == dm->backlight_dev[i])
3589 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3591 return amdgpu_dm_backlight_get_level(dm, i);
3594 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3595 .options = BL_CORE_SUSPENDRESUME,
3596 .get_brightness = amdgpu_dm_backlight_get_brightness,
3597 .update_status = amdgpu_dm_backlight_update_status,
3601 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3604 struct backlight_properties props = { 0 };
3606 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3607 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3609 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3610 props.brightness = AMDGPU_MAX_BL_LEVEL;
3611 props.type = BACKLIGHT_RAW;
3613 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3614 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3616 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3617 adev_to_drm(dm->adev)->dev,
3619 &amdgpu_dm_backlight_ops,
3622 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3623 DRM_ERROR("DM: Backlight registration failed!\n");
3625 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3629 static int initialize_plane(struct amdgpu_display_manager *dm,
3630 struct amdgpu_mode_info *mode_info, int plane_id,
3631 enum drm_plane_type plane_type,
3632 const struct dc_plane_cap *plane_cap)
3634 struct drm_plane *plane;
3635 unsigned long possible_crtcs;
3638 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3640 DRM_ERROR("KMS: Failed to allocate plane\n");
3643 plane->type = plane_type;
3646 * HACK: IGT tests expect that the primary plane for a CRTC
3647 * can only have one possible CRTC. Only expose support for
3648 * any CRTC if they're not going to be used as a primary plane
3649 * for a CRTC - like overlay or underlay planes.
3651 possible_crtcs = 1 << plane_id;
3652 if (plane_id >= dm->dc->caps.max_streams)
3653 possible_crtcs = 0xff;
3655 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3658 DRM_ERROR("KMS: Failed to initialize plane\n");
3664 mode_info->planes[plane_id] = plane;
3670 static void register_backlight_device(struct amdgpu_display_manager *dm,
3671 struct dc_link *link)
3673 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3674 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3676 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3677 link->type != dc_connection_none) {
3679 * Event if registration failed, we should continue with
3680 * DM initialization because not having a backlight control
3681 * is better then a black screen.
3683 if (!dm->backlight_dev[dm->num_of_edps])
3684 amdgpu_dm_register_backlight_device(dm);
3686 if (dm->backlight_dev[dm->num_of_edps]) {
3687 dm->backlight_link[dm->num_of_edps] = link;
3696 * In this architecture, the association
3697 * connector -> encoder -> crtc
3698 * id not really requried. The crtc and connector will hold the
3699 * display_index as an abstraction to use with DAL component
3701 * Returns 0 on success
3703 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3705 struct amdgpu_display_manager *dm = &adev->dm;
3707 struct amdgpu_dm_connector *aconnector = NULL;
3708 struct amdgpu_encoder *aencoder = NULL;
3709 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3711 int32_t primary_planes;
3712 enum dc_connection_type new_connection_type = dc_connection_none;
3713 const struct dc_plane_cap *plane;
3715 dm->display_indexes_num = dm->dc->caps.max_streams;
3716 /* Update the actual used number of crtc */
3717 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3719 link_cnt = dm->dc->caps.max_links;
3720 if (amdgpu_dm_mode_config_init(dm->adev)) {
3721 DRM_ERROR("DM: Failed to initialize mode config\n");
3725 /* There is one primary plane per CRTC */
3726 primary_planes = dm->dc->caps.max_streams;
3727 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3730 * Initialize primary planes, implicit planes for legacy IOCTLS.
3731 * Order is reversed to match iteration order in atomic check.
3733 for (i = (primary_planes - 1); i >= 0; i--) {
3734 plane = &dm->dc->caps.planes[i];
3736 if (initialize_plane(dm, mode_info, i,
3737 DRM_PLANE_TYPE_PRIMARY, plane)) {
3738 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3744 * Initialize overlay planes, index starting after primary planes.
3745 * These planes have a higher DRM index than the primary planes since
3746 * they should be considered as having a higher z-order.
3747 * Order is reversed to match iteration order in atomic check.
3749 * Only support DCN for now, and only expose one so we don't encourage
3750 * userspace to use up all the pipes.
3752 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3753 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3755 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3758 if (!plane->blends_with_above || !plane->blends_with_below)
3761 if (!plane->pixel_format_support.argb8888)
3764 if (initialize_plane(dm, NULL, primary_planes + i,
3765 DRM_PLANE_TYPE_OVERLAY, plane)) {
3766 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3770 /* Only create one overlay plane. */
3774 for (i = 0; i < dm->dc->caps.max_streams; i++)
3775 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3776 DRM_ERROR("KMS: Failed to initialize crtc\n");
3780 #if defined(CONFIG_DRM_AMD_DC_DCN)
3781 /* Use Outbox interrupt */
3782 switch (adev->asic_type) {
3783 case CHIP_SIENNA_CICHLID:
3784 case CHIP_NAVY_FLOUNDER:
3785 case CHIP_YELLOW_CARP:
3787 if (register_outbox_irq_handlers(dm->adev)) {
3788 DRM_ERROR("DM: Failed to initialize IRQ\n");
3793 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3797 /* loops over all connectors on the board */
3798 for (i = 0; i < link_cnt; i++) {
3799 struct dc_link *link = NULL;
3801 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3803 "KMS: Cannot support more than %d display indexes\n",
3804 AMDGPU_DM_MAX_DISPLAY_INDEX);
3808 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3812 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3816 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3817 DRM_ERROR("KMS: Failed to initialize encoder\n");
3821 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3822 DRM_ERROR("KMS: Failed to initialize connector\n");
3826 link = dc_get_link_at_index(dm->dc, i);
3828 if (!dc_link_detect_sink(link, &new_connection_type))
3829 DRM_ERROR("KMS: Failed to detect connector\n");
3831 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3832 emulated_link_detect(link);
3833 amdgpu_dm_update_connector_after_detect(aconnector);
3835 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3836 amdgpu_dm_update_connector_after_detect(aconnector);
3837 register_backlight_device(dm, link);
3838 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3839 amdgpu_dm_set_psr_caps(link);
3845 /* Software is initialized. Now we can register interrupt handlers. */
3846 switch (adev->asic_type) {
3847 #if defined(CONFIG_DRM_AMD_DC_SI)
3852 if (dce60_register_irq_handlers(dm->adev)) {
3853 DRM_ERROR("DM: Failed to initialize IRQ\n");
3867 case CHIP_POLARIS11:
3868 case CHIP_POLARIS10:
3869 case CHIP_POLARIS12:
3874 if (dce110_register_irq_handlers(dm->adev)) {
3875 DRM_ERROR("DM: Failed to initialize IRQ\n");
3879 #if defined(CONFIG_DRM_AMD_DC_DCN)
3885 case CHIP_SIENNA_CICHLID:
3886 case CHIP_NAVY_FLOUNDER:
3887 case CHIP_DIMGREY_CAVEFISH:
3888 case CHIP_BEIGE_GOBY:
3890 case CHIP_YELLOW_CARP:
3891 if (dcn10_register_irq_handlers(dm->adev)) {
3892 DRM_ERROR("DM: Failed to initialize IRQ\n");
3898 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3910 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3912 drm_atomic_private_obj_fini(&dm->atomic_obj);
3916 /******************************************************************************
3917 * amdgpu_display_funcs functions
3918 *****************************************************************************/
3921 * dm_bandwidth_update - program display watermarks
3923 * @adev: amdgpu_device pointer
3925 * Calculate and program the display watermarks and line buffer allocation.
3927 static void dm_bandwidth_update(struct amdgpu_device *adev)
3929 /* TODO: implement later */
3932 static const struct amdgpu_display_funcs dm_display_funcs = {
3933 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3934 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3935 .backlight_set_level = NULL, /* never called for DC */
3936 .backlight_get_level = NULL, /* never called for DC */
3937 .hpd_sense = NULL,/* called unconditionally */
3938 .hpd_set_polarity = NULL, /* called unconditionally */
3939 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3940 .page_flip_get_scanoutpos =
3941 dm_crtc_get_scanoutpos,/* called unconditionally */
3942 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3943 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3946 #if defined(CONFIG_DEBUG_KERNEL_DC)
3948 static ssize_t s3_debug_store(struct device *device,
3949 struct device_attribute *attr,
3955 struct drm_device *drm_dev = dev_get_drvdata(device);
3956 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3958 ret = kstrtoint(buf, 0, &s3_state);
3963 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3968 return ret == 0 ? count : 0;
3971 DEVICE_ATTR_WO(s3_debug);
3975 static int dm_early_init(void *handle)
3977 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3979 switch (adev->asic_type) {
3980 #if defined(CONFIG_DRM_AMD_DC_SI)
3984 adev->mode_info.num_crtc = 6;
3985 adev->mode_info.num_hpd = 6;
3986 adev->mode_info.num_dig = 6;
3989 adev->mode_info.num_crtc = 2;
3990 adev->mode_info.num_hpd = 2;
3991 adev->mode_info.num_dig = 2;
3996 adev->mode_info.num_crtc = 6;
3997 adev->mode_info.num_hpd = 6;
3998 adev->mode_info.num_dig = 6;
4001 adev->mode_info.num_crtc = 4;
4002 adev->mode_info.num_hpd = 6;
4003 adev->mode_info.num_dig = 7;
4007 adev->mode_info.num_crtc = 2;
4008 adev->mode_info.num_hpd = 6;
4009 adev->mode_info.num_dig = 6;
4013 adev->mode_info.num_crtc = 6;
4014 adev->mode_info.num_hpd = 6;
4015 adev->mode_info.num_dig = 7;
4018 adev->mode_info.num_crtc = 3;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 9;
4023 adev->mode_info.num_crtc = 2;
4024 adev->mode_info.num_hpd = 6;
4025 adev->mode_info.num_dig = 9;
4027 case CHIP_POLARIS11:
4028 case CHIP_POLARIS12:
4029 adev->mode_info.num_crtc = 5;
4030 adev->mode_info.num_hpd = 5;
4031 adev->mode_info.num_dig = 5;
4033 case CHIP_POLARIS10:
4035 adev->mode_info.num_crtc = 6;
4036 adev->mode_info.num_hpd = 6;
4037 adev->mode_info.num_dig = 6;
4042 adev->mode_info.num_crtc = 6;
4043 adev->mode_info.num_hpd = 6;
4044 adev->mode_info.num_dig = 6;
4046 #if defined(CONFIG_DRM_AMD_DC_DCN)
4050 adev->mode_info.num_crtc = 4;
4051 adev->mode_info.num_hpd = 4;
4052 adev->mode_info.num_dig = 4;
4056 case CHIP_SIENNA_CICHLID:
4057 case CHIP_NAVY_FLOUNDER:
4058 adev->mode_info.num_crtc = 6;
4059 adev->mode_info.num_hpd = 6;
4060 adev->mode_info.num_dig = 6;
4062 case CHIP_YELLOW_CARP:
4063 adev->mode_info.num_crtc = 4;
4064 adev->mode_info.num_hpd = 4;
4065 adev->mode_info.num_dig = 4;
4068 case CHIP_DIMGREY_CAVEFISH:
4069 adev->mode_info.num_crtc = 5;
4070 adev->mode_info.num_hpd = 5;
4071 adev->mode_info.num_dig = 5;
4073 case CHIP_BEIGE_GOBY:
4074 adev->mode_info.num_crtc = 2;
4075 adev->mode_info.num_hpd = 2;
4076 adev->mode_info.num_dig = 2;
4080 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4084 amdgpu_dm_set_irq_funcs(adev);
4086 if (adev->mode_info.funcs == NULL)
4087 adev->mode_info.funcs = &dm_display_funcs;
4090 * Note: Do NOT change adev->audio_endpt_rreg and
4091 * adev->audio_endpt_wreg because they are initialised in
4092 * amdgpu_device_init()
4094 #if defined(CONFIG_DEBUG_KERNEL_DC)
4096 adev_to_drm(adev)->dev,
4097 &dev_attr_s3_debug);
4103 static bool modeset_required(struct drm_crtc_state *crtc_state,
4104 struct dc_stream_state *new_stream,
4105 struct dc_stream_state *old_stream)
4107 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4110 static bool modereset_required(struct drm_crtc_state *crtc_state)
4112 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4115 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4117 drm_encoder_cleanup(encoder);
4121 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4122 .destroy = amdgpu_dm_encoder_destroy,
4126 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4127 struct drm_framebuffer *fb,
4128 int *min_downscale, int *max_upscale)
4130 struct amdgpu_device *adev = drm_to_adev(dev);
4131 struct dc *dc = adev->dm.dc;
4132 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4133 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4135 switch (fb->format->format) {
4136 case DRM_FORMAT_P010:
4137 case DRM_FORMAT_NV12:
4138 case DRM_FORMAT_NV21:
4139 *max_upscale = plane_cap->max_upscale_factor.nv12;
4140 *min_downscale = plane_cap->max_downscale_factor.nv12;
4143 case DRM_FORMAT_XRGB16161616F:
4144 case DRM_FORMAT_ARGB16161616F:
4145 case DRM_FORMAT_XBGR16161616F:
4146 case DRM_FORMAT_ABGR16161616F:
4147 *max_upscale = plane_cap->max_upscale_factor.fp16;
4148 *min_downscale = plane_cap->max_downscale_factor.fp16;
4152 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4153 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4158 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4159 * scaling factor of 1.0 == 1000 units.
4161 if (*max_upscale == 1)
4162 *max_upscale = 1000;
4164 if (*min_downscale == 1)
4165 *min_downscale = 1000;
4169 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4170 struct dc_scaling_info *scaling_info)
4172 int scale_w, scale_h, min_downscale, max_upscale;
4174 memset(scaling_info, 0, sizeof(*scaling_info));
4176 /* Source is fixed 16.16 but we ignore mantissa for now... */
4177 scaling_info->src_rect.x = state->src_x >> 16;
4178 scaling_info->src_rect.y = state->src_y >> 16;
4181 * For reasons we don't (yet) fully understand a non-zero
4182 * src_y coordinate into an NV12 buffer can cause a
4183 * system hang. To avoid hangs (and maybe be overly cautious)
4184 * let's reject both non-zero src_x and src_y.
4186 * We currently know of only one use-case to reproduce a
4187 * scenario with non-zero src_x and src_y for NV12, which
4188 * is to gesture the YouTube Android app into full screen
4192 state->fb->format->format == DRM_FORMAT_NV12 &&
4193 (scaling_info->src_rect.x != 0 ||
4194 scaling_info->src_rect.y != 0))
4197 scaling_info->src_rect.width = state->src_w >> 16;
4198 if (scaling_info->src_rect.width == 0)
4201 scaling_info->src_rect.height = state->src_h >> 16;
4202 if (scaling_info->src_rect.height == 0)
4205 scaling_info->dst_rect.x = state->crtc_x;
4206 scaling_info->dst_rect.y = state->crtc_y;
4208 if (state->crtc_w == 0)
4211 scaling_info->dst_rect.width = state->crtc_w;
4213 if (state->crtc_h == 0)
4216 scaling_info->dst_rect.height = state->crtc_h;
4218 /* DRM doesn't specify clipping on destination output. */
4219 scaling_info->clip_rect = scaling_info->dst_rect;
4221 /* Validate scaling per-format with DC plane caps */
4222 if (state->plane && state->plane->dev && state->fb) {
4223 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4224 &min_downscale, &max_upscale);
4226 min_downscale = 250;
4227 max_upscale = 16000;
4230 scale_w = scaling_info->dst_rect.width * 1000 /
4231 scaling_info->src_rect.width;
4233 if (scale_w < min_downscale || scale_w > max_upscale)
4236 scale_h = scaling_info->dst_rect.height * 1000 /
4237 scaling_info->src_rect.height;
4239 if (scale_h < min_downscale || scale_h > max_upscale)
4243 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4244 * assume reasonable defaults based on the format.
4251 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4252 uint64_t tiling_flags)
4254 /* Fill GFX8 params */
4255 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4256 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4258 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4259 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4260 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4261 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4262 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4264 /* XXX fix me for VI */
4265 tiling_info->gfx8.num_banks = num_banks;
4266 tiling_info->gfx8.array_mode =
4267 DC_ARRAY_2D_TILED_THIN1;
4268 tiling_info->gfx8.tile_split = tile_split;
4269 tiling_info->gfx8.bank_width = bankw;
4270 tiling_info->gfx8.bank_height = bankh;
4271 tiling_info->gfx8.tile_aspect = mtaspect;
4272 tiling_info->gfx8.tile_mode =
4273 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4274 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4275 == DC_ARRAY_1D_TILED_THIN1) {
4276 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4279 tiling_info->gfx8.pipe_config =
4280 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4284 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4285 union dc_tiling_info *tiling_info)
4287 tiling_info->gfx9.num_pipes =
4288 adev->gfx.config.gb_addr_config_fields.num_pipes;
4289 tiling_info->gfx9.num_banks =
4290 adev->gfx.config.gb_addr_config_fields.num_banks;
4291 tiling_info->gfx9.pipe_interleave =
4292 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4293 tiling_info->gfx9.num_shader_engines =
4294 adev->gfx.config.gb_addr_config_fields.num_se;
4295 tiling_info->gfx9.max_compressed_frags =
4296 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4297 tiling_info->gfx9.num_rb_per_se =
4298 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4299 tiling_info->gfx9.shaderEnable = 1;
4300 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4301 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4302 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4303 adev->asic_type == CHIP_BEIGE_GOBY ||
4304 adev->asic_type == CHIP_YELLOW_CARP ||
4305 adev->asic_type == CHIP_VANGOGH)
4306 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4310 validate_dcc(struct amdgpu_device *adev,
4311 const enum surface_pixel_format format,
4312 const enum dc_rotation_angle rotation,
4313 const union dc_tiling_info *tiling_info,
4314 const struct dc_plane_dcc_param *dcc,
4315 const struct dc_plane_address *address,
4316 const struct plane_size *plane_size)
4318 struct dc *dc = adev->dm.dc;
4319 struct dc_dcc_surface_param input;
4320 struct dc_surface_dcc_cap output;
4322 memset(&input, 0, sizeof(input));
4323 memset(&output, 0, sizeof(output));
4328 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4329 !dc->cap_funcs.get_dcc_compression_cap)
4332 input.format = format;
4333 input.surface_size.width = plane_size->surface_size.width;
4334 input.surface_size.height = plane_size->surface_size.height;
4335 input.swizzle_mode = tiling_info->gfx9.swizzle;
4337 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4338 input.scan = SCAN_DIRECTION_HORIZONTAL;
4339 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4340 input.scan = SCAN_DIRECTION_VERTICAL;
4342 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4345 if (!output.capable)
4348 if (dcc->independent_64b_blks == 0 &&
4349 output.grph.rgb.independent_64b_blks != 0)
4356 modifier_has_dcc(uint64_t modifier)
4358 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4362 modifier_gfx9_swizzle_mode(uint64_t modifier)
4364 if (modifier == DRM_FORMAT_MOD_LINEAR)
4367 return AMD_FMT_MOD_GET(TILE, modifier);
4370 static const struct drm_format_info *
4371 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4373 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4377 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4378 union dc_tiling_info *tiling_info,
4381 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4382 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4383 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4384 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4386 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4388 if (!IS_AMD_FMT_MOD(modifier))
4391 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4392 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4394 if (adev->family >= AMDGPU_FAMILY_NV) {
4395 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4397 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4399 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4403 enum dm_micro_swizzle {
4404 MICRO_SWIZZLE_Z = 0,
4405 MICRO_SWIZZLE_S = 1,
4406 MICRO_SWIZZLE_D = 2,
4410 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4414 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4415 const struct drm_format_info *info = drm_format_info(format);
4418 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4424 * We always have to allow these modifiers:
4425 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4426 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4428 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4429 modifier == DRM_FORMAT_MOD_INVALID) {
4433 /* Check that the modifier is on the list of the plane's supported modifiers. */
4434 for (i = 0; i < plane->modifier_count; i++) {
4435 if (modifier == plane->modifiers[i])
4438 if (i == plane->modifier_count)
4442 * For D swizzle the canonical modifier depends on the bpp, so check
4445 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4446 adev->family >= AMDGPU_FAMILY_NV) {
4447 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4451 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4455 if (modifier_has_dcc(modifier)) {
4456 /* Per radeonsi comments 16/64 bpp are more complicated. */
4457 if (info->cpp[0] != 4)
4459 /* We support multi-planar formats, but not when combined with
4460 * additional DCC metadata planes. */
4461 if (info->num_planes > 1)
4469 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4474 if (*cap - *size < 1) {
4475 uint64_t new_cap = *cap * 2;
4476 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4484 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4490 (*mods)[*size] = mod;
4495 add_gfx9_modifiers(const struct amdgpu_device *adev,
4496 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4498 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4499 int pipe_xor_bits = min(8, pipes +
4500 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4501 int bank_xor_bits = min(8 - pipe_xor_bits,
4502 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4503 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4504 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4507 if (adev->family == AMDGPU_FAMILY_RV) {
4508 /* Raven2 and later */
4509 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4512 * No _D DCC swizzles yet because we only allow 32bpp, which
4513 * doesn't support _D on DCN
4516 if (has_constant_encode) {
4517 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4518 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4519 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4520 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4521 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4522 AMD_FMT_MOD_SET(DCC, 1) |
4523 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4524 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4525 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4528 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4530 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4531 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4532 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4533 AMD_FMT_MOD_SET(DCC, 1) |
4534 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4535 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4536 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4538 if (has_constant_encode) {
4539 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4540 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4541 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4542 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4543 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4544 AMD_FMT_MOD_SET(DCC, 1) |
4545 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4546 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4547 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4549 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4550 AMD_FMT_MOD_SET(RB, rb) |
4551 AMD_FMT_MOD_SET(PIPE, pipes));
4554 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4555 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4556 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4557 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4558 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4559 AMD_FMT_MOD_SET(DCC, 1) |
4560 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4561 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4562 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4563 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4564 AMD_FMT_MOD_SET(RB, rb) |
4565 AMD_FMT_MOD_SET(PIPE, pipes));
4569 * Only supported for 64bpp on Raven, will be filtered on format in
4570 * dm_plane_format_mod_supported.
4572 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4573 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4574 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4575 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4576 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4578 if (adev->family == AMDGPU_FAMILY_RV) {
4579 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4580 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4581 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4582 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4583 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4587 * Only supported for 64bpp on Raven, will be filtered on format in
4588 * dm_plane_format_mod_supported.
4590 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4594 if (adev->family == AMDGPU_FAMILY_RV) {
4595 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4596 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4597 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4602 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4603 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4605 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4607 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4609 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4610 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4611 AMD_FMT_MOD_SET(DCC, 1) |
4612 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4613 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4614 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4616 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4617 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4618 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4619 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4620 AMD_FMT_MOD_SET(DCC, 1) |
4621 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4622 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4623 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4624 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4626 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4628 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4629 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4631 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4632 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4633 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4634 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4637 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4638 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4639 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4640 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4642 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4643 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4644 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4648 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4649 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4651 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4652 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4654 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4655 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4656 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4657 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4658 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4659 AMD_FMT_MOD_SET(DCC, 1) |
4660 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4661 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4662 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4663 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4665 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4666 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4667 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4668 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4669 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4670 AMD_FMT_MOD_SET(DCC, 1) |
4671 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4672 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4673 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4674 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4675 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4677 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4678 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4679 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4680 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4681 AMD_FMT_MOD_SET(PACKERS, pkrs));
4683 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4684 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4685 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4686 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4687 AMD_FMT_MOD_SET(PACKERS, pkrs));
4689 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4690 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4691 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4692 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4694 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4695 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4696 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4700 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4702 uint64_t size = 0, capacity = 128;
4705 /* We have not hooked up any pre-GFX9 modifiers. */
4706 if (adev->family < AMDGPU_FAMILY_AI)
4709 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4711 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4712 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4713 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4714 return *mods ? 0 : -ENOMEM;
4717 switch (adev->family) {
4718 case AMDGPU_FAMILY_AI:
4719 case AMDGPU_FAMILY_RV:
4720 add_gfx9_modifiers(adev, mods, &size, &capacity);
4722 case AMDGPU_FAMILY_NV:
4723 case AMDGPU_FAMILY_VGH:
4724 case AMDGPU_FAMILY_YC:
4725 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4726 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4728 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4732 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4734 /* INVALID marks the end of the list. */
4735 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4744 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4745 const struct amdgpu_framebuffer *afb,
4746 const enum surface_pixel_format format,
4747 const enum dc_rotation_angle rotation,
4748 const struct plane_size *plane_size,
4749 union dc_tiling_info *tiling_info,
4750 struct dc_plane_dcc_param *dcc,
4751 struct dc_plane_address *address,
4752 const bool force_disable_dcc)
4754 const uint64_t modifier = afb->base.modifier;
4757 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4758 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4760 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4761 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4764 dcc->meta_pitch = afb->base.pitches[1];
4765 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4767 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4768 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4771 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4773 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4779 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4780 const struct amdgpu_framebuffer *afb,
4781 const enum surface_pixel_format format,
4782 const enum dc_rotation_angle rotation,
4783 const uint64_t tiling_flags,
4784 union dc_tiling_info *tiling_info,
4785 struct plane_size *plane_size,
4786 struct dc_plane_dcc_param *dcc,
4787 struct dc_plane_address *address,
4789 bool force_disable_dcc)
4791 const struct drm_framebuffer *fb = &afb->base;
4794 memset(tiling_info, 0, sizeof(*tiling_info));
4795 memset(plane_size, 0, sizeof(*plane_size));
4796 memset(dcc, 0, sizeof(*dcc));
4797 memset(address, 0, sizeof(*address));
4799 address->tmz_surface = tmz_surface;
4801 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4802 uint64_t addr = afb->address + fb->offsets[0];
4804 plane_size->surface_size.x = 0;
4805 plane_size->surface_size.y = 0;
4806 plane_size->surface_size.width = fb->width;
4807 plane_size->surface_size.height = fb->height;
4808 plane_size->surface_pitch =
4809 fb->pitches[0] / fb->format->cpp[0];
4811 address->type = PLN_ADDR_TYPE_GRAPHICS;
4812 address->grph.addr.low_part = lower_32_bits(addr);
4813 address->grph.addr.high_part = upper_32_bits(addr);
4814 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4815 uint64_t luma_addr = afb->address + fb->offsets[0];
4816 uint64_t chroma_addr = afb->address + fb->offsets[1];
4818 plane_size->surface_size.x = 0;
4819 plane_size->surface_size.y = 0;
4820 plane_size->surface_size.width = fb->width;
4821 plane_size->surface_size.height = fb->height;
4822 plane_size->surface_pitch =
4823 fb->pitches[0] / fb->format->cpp[0];
4825 plane_size->chroma_size.x = 0;
4826 plane_size->chroma_size.y = 0;
4827 /* TODO: set these based on surface format */
4828 plane_size->chroma_size.width = fb->width / 2;
4829 plane_size->chroma_size.height = fb->height / 2;
4831 plane_size->chroma_pitch =
4832 fb->pitches[1] / fb->format->cpp[1];
4834 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4835 address->video_progressive.luma_addr.low_part =
4836 lower_32_bits(luma_addr);
4837 address->video_progressive.luma_addr.high_part =
4838 upper_32_bits(luma_addr);
4839 address->video_progressive.chroma_addr.low_part =
4840 lower_32_bits(chroma_addr);
4841 address->video_progressive.chroma_addr.high_part =
4842 upper_32_bits(chroma_addr);
4845 if (adev->family >= AMDGPU_FAMILY_AI) {
4846 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4847 rotation, plane_size,
4854 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4861 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4862 bool *per_pixel_alpha, bool *global_alpha,
4863 int *global_alpha_value)
4865 *per_pixel_alpha = false;
4866 *global_alpha = false;
4867 *global_alpha_value = 0xff;
4869 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4872 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4873 static const uint32_t alpha_formats[] = {
4874 DRM_FORMAT_ARGB8888,
4875 DRM_FORMAT_RGBA8888,
4876 DRM_FORMAT_ABGR8888,
4878 uint32_t format = plane_state->fb->format->format;
4881 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4882 if (format == alpha_formats[i]) {
4883 *per_pixel_alpha = true;
4889 if (plane_state->alpha < 0xffff) {
4890 *global_alpha = true;
4891 *global_alpha_value = plane_state->alpha >> 8;
4896 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4897 const enum surface_pixel_format format,
4898 enum dc_color_space *color_space)
4902 *color_space = COLOR_SPACE_SRGB;
4904 /* DRM color properties only affect non-RGB formats. */
4905 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4908 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4910 switch (plane_state->color_encoding) {
4911 case DRM_COLOR_YCBCR_BT601:
4913 *color_space = COLOR_SPACE_YCBCR601;
4915 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4918 case DRM_COLOR_YCBCR_BT709:
4920 *color_space = COLOR_SPACE_YCBCR709;
4922 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4925 case DRM_COLOR_YCBCR_BT2020:
4927 *color_space = COLOR_SPACE_2020_YCBCR;
4940 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4941 const struct drm_plane_state *plane_state,
4942 const uint64_t tiling_flags,
4943 struct dc_plane_info *plane_info,
4944 struct dc_plane_address *address,
4946 bool force_disable_dcc)
4948 const struct drm_framebuffer *fb = plane_state->fb;
4949 const struct amdgpu_framebuffer *afb =
4950 to_amdgpu_framebuffer(plane_state->fb);
4953 memset(plane_info, 0, sizeof(*plane_info));
4955 switch (fb->format->format) {
4957 plane_info->format =
4958 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4960 case DRM_FORMAT_RGB565:
4961 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4963 case DRM_FORMAT_XRGB8888:
4964 case DRM_FORMAT_ARGB8888:
4965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4967 case DRM_FORMAT_XRGB2101010:
4968 case DRM_FORMAT_ARGB2101010:
4969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4971 case DRM_FORMAT_XBGR2101010:
4972 case DRM_FORMAT_ABGR2101010:
4973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4975 case DRM_FORMAT_XBGR8888:
4976 case DRM_FORMAT_ABGR8888:
4977 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4979 case DRM_FORMAT_NV21:
4980 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4982 case DRM_FORMAT_NV12:
4983 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4985 case DRM_FORMAT_P010:
4986 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4988 case DRM_FORMAT_XRGB16161616F:
4989 case DRM_FORMAT_ARGB16161616F:
4990 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4992 case DRM_FORMAT_XBGR16161616F:
4993 case DRM_FORMAT_ABGR16161616F:
4994 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4996 case DRM_FORMAT_XRGB16161616:
4997 case DRM_FORMAT_ARGB16161616:
4998 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5000 case DRM_FORMAT_XBGR16161616:
5001 case DRM_FORMAT_ABGR16161616:
5002 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5006 "Unsupported screen format %p4cc\n",
5007 &fb->format->format);
5011 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5012 case DRM_MODE_ROTATE_0:
5013 plane_info->rotation = ROTATION_ANGLE_0;
5015 case DRM_MODE_ROTATE_90:
5016 plane_info->rotation = ROTATION_ANGLE_90;
5018 case DRM_MODE_ROTATE_180:
5019 plane_info->rotation = ROTATION_ANGLE_180;
5021 case DRM_MODE_ROTATE_270:
5022 plane_info->rotation = ROTATION_ANGLE_270;
5025 plane_info->rotation = ROTATION_ANGLE_0;
5029 plane_info->visible = true;
5030 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5032 plane_info->layer_index = 0;
5034 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5035 &plane_info->color_space);
5039 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5040 plane_info->rotation, tiling_flags,
5041 &plane_info->tiling_info,
5042 &plane_info->plane_size,
5043 &plane_info->dcc, address, tmz_surface,
5048 fill_blending_from_plane_state(
5049 plane_state, &plane_info->per_pixel_alpha,
5050 &plane_info->global_alpha, &plane_info->global_alpha_value);
5055 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5056 struct dc_plane_state *dc_plane_state,
5057 struct drm_plane_state *plane_state,
5058 struct drm_crtc_state *crtc_state)
5060 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5061 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5062 struct dc_scaling_info scaling_info;
5063 struct dc_plane_info plane_info;
5065 bool force_disable_dcc = false;
5067 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5071 dc_plane_state->src_rect = scaling_info.src_rect;
5072 dc_plane_state->dst_rect = scaling_info.dst_rect;
5073 dc_plane_state->clip_rect = scaling_info.clip_rect;
5074 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5076 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5077 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5080 &dc_plane_state->address,
5086 dc_plane_state->format = plane_info.format;
5087 dc_plane_state->color_space = plane_info.color_space;
5088 dc_plane_state->format = plane_info.format;
5089 dc_plane_state->plane_size = plane_info.plane_size;
5090 dc_plane_state->rotation = plane_info.rotation;
5091 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5092 dc_plane_state->stereo_format = plane_info.stereo_format;
5093 dc_plane_state->tiling_info = plane_info.tiling_info;
5094 dc_plane_state->visible = plane_info.visible;
5095 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5096 dc_plane_state->global_alpha = plane_info.global_alpha;
5097 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5098 dc_plane_state->dcc = plane_info.dcc;
5099 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5100 dc_plane_state->flip_int_enabled = true;
5103 * Always set input transfer function, since plane state is refreshed
5106 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5113 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5114 const struct dm_connector_state *dm_state,
5115 struct dc_stream_state *stream)
5117 enum amdgpu_rmx_type rmx_type;
5119 struct rect src = { 0 }; /* viewport in composition space*/
5120 struct rect dst = { 0 }; /* stream addressable area */
5122 /* no mode. nothing to be done */
5126 /* Full screen scaling by default */
5127 src.width = mode->hdisplay;
5128 src.height = mode->vdisplay;
5129 dst.width = stream->timing.h_addressable;
5130 dst.height = stream->timing.v_addressable;
5133 rmx_type = dm_state->scaling;
5134 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5135 if (src.width * dst.height <
5136 src.height * dst.width) {
5137 /* height needs less upscaling/more downscaling */
5138 dst.width = src.width *
5139 dst.height / src.height;
5141 /* width needs less upscaling/more downscaling */
5142 dst.height = src.height *
5143 dst.width / src.width;
5145 } else if (rmx_type == RMX_CENTER) {
5149 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5150 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5152 if (dm_state->underscan_enable) {
5153 dst.x += dm_state->underscan_hborder / 2;
5154 dst.y += dm_state->underscan_vborder / 2;
5155 dst.width -= dm_state->underscan_hborder;
5156 dst.height -= dm_state->underscan_vborder;
5163 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5164 dst.x, dst.y, dst.width, dst.height);
5168 static enum dc_color_depth
5169 convert_color_depth_from_display_info(const struct drm_connector *connector,
5170 bool is_y420, int requested_bpc)
5177 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5178 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5180 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5182 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5185 bpc = (uint8_t)connector->display_info.bpc;
5186 /* Assume 8 bpc by default if no bpc is specified. */
5187 bpc = bpc ? bpc : 8;
5190 if (requested_bpc > 0) {
5192 * Cap display bpc based on the user requested value.
5194 * The value for state->max_bpc may not correctly updated
5195 * depending on when the connector gets added to the state
5196 * or if this was called outside of atomic check, so it
5197 * can't be used directly.
5199 bpc = min_t(u8, bpc, requested_bpc);
5201 /* Round down to the nearest even number. */
5202 bpc = bpc - (bpc & 1);
5208 * Temporary Work around, DRM doesn't parse color depth for
5209 * EDID revision before 1.4
5210 * TODO: Fix edid parsing
5212 return COLOR_DEPTH_888;
5214 return COLOR_DEPTH_666;
5216 return COLOR_DEPTH_888;
5218 return COLOR_DEPTH_101010;
5220 return COLOR_DEPTH_121212;
5222 return COLOR_DEPTH_141414;
5224 return COLOR_DEPTH_161616;
5226 return COLOR_DEPTH_UNDEFINED;
5230 static enum dc_aspect_ratio
5231 get_aspect_ratio(const struct drm_display_mode *mode_in)
5233 /* 1-1 mapping, since both enums follow the HDMI spec. */
5234 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5237 static enum dc_color_space
5238 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5240 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5242 switch (dc_crtc_timing->pixel_encoding) {
5243 case PIXEL_ENCODING_YCBCR422:
5244 case PIXEL_ENCODING_YCBCR444:
5245 case PIXEL_ENCODING_YCBCR420:
5248 * 27030khz is the separation point between HDTV and SDTV
5249 * according to HDMI spec, we use YCbCr709 and YCbCr601
5252 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5253 if (dc_crtc_timing->flags.Y_ONLY)
5255 COLOR_SPACE_YCBCR709_LIMITED;
5257 color_space = COLOR_SPACE_YCBCR709;
5259 if (dc_crtc_timing->flags.Y_ONLY)
5261 COLOR_SPACE_YCBCR601_LIMITED;
5263 color_space = COLOR_SPACE_YCBCR601;
5268 case PIXEL_ENCODING_RGB:
5269 color_space = COLOR_SPACE_SRGB;
5280 static bool adjust_colour_depth_from_display_info(
5281 struct dc_crtc_timing *timing_out,
5282 const struct drm_display_info *info)
5284 enum dc_color_depth depth = timing_out->display_color_depth;
5287 normalized_clk = timing_out->pix_clk_100hz / 10;
5288 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5289 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5290 normalized_clk /= 2;
5291 /* Adjusting pix clock following on HDMI spec based on colour depth */
5293 case COLOR_DEPTH_888:
5295 case COLOR_DEPTH_101010:
5296 normalized_clk = (normalized_clk * 30) / 24;
5298 case COLOR_DEPTH_121212:
5299 normalized_clk = (normalized_clk * 36) / 24;
5301 case COLOR_DEPTH_161616:
5302 normalized_clk = (normalized_clk * 48) / 24;
5305 /* The above depths are the only ones valid for HDMI. */
5308 if (normalized_clk <= info->max_tmds_clock) {
5309 timing_out->display_color_depth = depth;
5312 } while (--depth > COLOR_DEPTH_666);
5316 static void fill_stream_properties_from_drm_display_mode(
5317 struct dc_stream_state *stream,
5318 const struct drm_display_mode *mode_in,
5319 const struct drm_connector *connector,
5320 const struct drm_connector_state *connector_state,
5321 const struct dc_stream_state *old_stream,
5324 struct dc_crtc_timing *timing_out = &stream->timing;
5325 const struct drm_display_info *info = &connector->display_info;
5326 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5327 struct hdmi_vendor_infoframe hv_frame;
5328 struct hdmi_avi_infoframe avi_frame;
5330 memset(&hv_frame, 0, sizeof(hv_frame));
5331 memset(&avi_frame, 0, sizeof(avi_frame));
5333 timing_out->h_border_left = 0;
5334 timing_out->h_border_right = 0;
5335 timing_out->v_border_top = 0;
5336 timing_out->v_border_bottom = 0;
5337 /* TODO: un-hardcode */
5338 if (drm_mode_is_420_only(info, mode_in)
5339 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5340 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5341 else if (drm_mode_is_420_also(info, mode_in)
5342 && aconnector->force_yuv420_output)
5343 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5344 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5345 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5346 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5348 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5350 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5351 timing_out->display_color_depth = convert_color_depth_from_display_info(
5353 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5355 timing_out->scan_type = SCANNING_TYPE_NODATA;
5356 timing_out->hdmi_vic = 0;
5359 timing_out->vic = old_stream->timing.vic;
5360 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5361 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5363 timing_out->vic = drm_match_cea_mode(mode_in);
5364 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5365 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5366 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5367 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5370 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5371 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5372 timing_out->vic = avi_frame.video_code;
5373 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5374 timing_out->hdmi_vic = hv_frame.vic;
5377 if (is_freesync_video_mode(mode_in, aconnector)) {
5378 timing_out->h_addressable = mode_in->hdisplay;
5379 timing_out->h_total = mode_in->htotal;
5380 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5381 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5382 timing_out->v_total = mode_in->vtotal;
5383 timing_out->v_addressable = mode_in->vdisplay;
5384 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5385 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5386 timing_out->pix_clk_100hz = mode_in->clock * 10;
5388 timing_out->h_addressable = mode_in->crtc_hdisplay;
5389 timing_out->h_total = mode_in->crtc_htotal;
5390 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5391 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5392 timing_out->v_total = mode_in->crtc_vtotal;
5393 timing_out->v_addressable = mode_in->crtc_vdisplay;
5394 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5395 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5396 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5399 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5401 stream->output_color_space = get_output_color_space(timing_out);
5403 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5404 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5405 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5406 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5407 drm_mode_is_420_also(info, mode_in) &&
5408 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5409 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5410 adjust_colour_depth_from_display_info(timing_out, info);
5415 static void fill_audio_info(struct audio_info *audio_info,
5416 const struct drm_connector *drm_connector,
5417 const struct dc_sink *dc_sink)
5420 int cea_revision = 0;
5421 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5423 audio_info->manufacture_id = edid_caps->manufacturer_id;
5424 audio_info->product_id = edid_caps->product_id;
5426 cea_revision = drm_connector->display_info.cea_rev;
5428 strscpy(audio_info->display_name,
5429 edid_caps->display_name,
5430 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5432 if (cea_revision >= 3) {
5433 audio_info->mode_count = edid_caps->audio_mode_count;
5435 for (i = 0; i < audio_info->mode_count; ++i) {
5436 audio_info->modes[i].format_code =
5437 (enum audio_format_code)
5438 (edid_caps->audio_modes[i].format_code);
5439 audio_info->modes[i].channel_count =
5440 edid_caps->audio_modes[i].channel_count;
5441 audio_info->modes[i].sample_rates.all =
5442 edid_caps->audio_modes[i].sample_rate;
5443 audio_info->modes[i].sample_size =
5444 edid_caps->audio_modes[i].sample_size;
5448 audio_info->flags.all = edid_caps->speaker_flags;
5450 /* TODO: We only check for the progressive mode, check for interlace mode too */
5451 if (drm_connector->latency_present[0]) {
5452 audio_info->video_latency = drm_connector->video_latency[0];
5453 audio_info->audio_latency = drm_connector->audio_latency[0];
5456 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5461 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5462 struct drm_display_mode *dst_mode)
5464 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5465 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5466 dst_mode->crtc_clock = src_mode->crtc_clock;
5467 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5468 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5469 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5470 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5471 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5472 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5473 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5474 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5475 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5476 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5477 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5481 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5482 const struct drm_display_mode *native_mode,
5485 if (scale_enabled) {
5486 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5487 } else if (native_mode->clock == drm_mode->clock &&
5488 native_mode->htotal == drm_mode->htotal &&
5489 native_mode->vtotal == drm_mode->vtotal) {
5490 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5492 /* no scaling nor amdgpu inserted, no need to patch */
5496 static struct dc_sink *
5497 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5499 struct dc_sink_init_data sink_init_data = { 0 };
5500 struct dc_sink *sink = NULL;
5501 sink_init_data.link = aconnector->dc_link;
5502 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5504 sink = dc_sink_create(&sink_init_data);
5506 DRM_ERROR("Failed to create sink!\n");
5509 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5514 static void set_multisync_trigger_params(
5515 struct dc_stream_state *stream)
5517 struct dc_stream_state *master = NULL;
5519 if (stream->triggered_crtc_reset.enabled) {
5520 master = stream->triggered_crtc_reset.event_source;
5521 stream->triggered_crtc_reset.event =
5522 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5523 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5524 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5528 static void set_master_stream(struct dc_stream_state *stream_set[],
5531 int j, highest_rfr = 0, master_stream = 0;
5533 for (j = 0; j < stream_count; j++) {
5534 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5535 int refresh_rate = 0;
5537 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5538 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5539 if (refresh_rate > highest_rfr) {
5540 highest_rfr = refresh_rate;
5545 for (j = 0; j < stream_count; j++) {
5547 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5551 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5554 struct dc_stream_state *stream;
5556 if (context->stream_count < 2)
5558 for (i = 0; i < context->stream_count ; i++) {
5559 if (!context->streams[i])
5562 * TODO: add a function to read AMD VSDB bits and set
5563 * crtc_sync_master.multi_sync_enabled flag
5564 * For now it's set to false
5568 set_master_stream(context->streams, context->stream_count);
5570 for (i = 0; i < context->stream_count ; i++) {
5571 stream = context->streams[i];
5576 set_multisync_trigger_params(stream);
5580 #if defined(CONFIG_DRM_AMD_DC_DCN)
5581 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5582 struct dc_sink *sink, struct dc_stream_state *stream,
5583 struct dsc_dec_dpcd_caps *dsc_caps)
5585 stream->timing.flags.DSC = 0;
5587 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5588 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5589 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5590 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5595 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5596 struct dc_sink *sink, struct dc_stream_state *stream,
5597 struct dsc_dec_dpcd_caps *dsc_caps)
5599 struct drm_connector *drm_connector = &aconnector->base;
5600 uint32_t link_bandwidth_kbps;
5602 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5603 dc_link_get_link_cap(aconnector->dc_link));
5604 /* Set DSC policy according to dsc_clock_en */
5605 dc_dsc_policy_set_enable_dsc_when_not_needed(
5606 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5608 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5610 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5612 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5614 link_bandwidth_kbps,
5616 &stream->timing.dsc_cfg)) {
5617 stream->timing.flags.DSC = 1;
5618 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5622 /* Overwrite the stream flag if DSC is enabled through debugfs */
5623 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5624 stream->timing.flags.DSC = 1;
5626 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5627 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5629 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5630 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5632 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5633 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5638 * DOC: FreeSync Video
5640 * When a userspace application wants to play a video, the content follows a
5641 * standard format definition that usually specifies the FPS for that format.
5642 * The below list illustrates some video format and the expected FPS,
5645 * - TV/NTSC (23.976 FPS)
5648 * - TV/NTSC (29.97 FPS)
5649 * - TV/NTSC (30 FPS)
5650 * - Cinema HFR (48 FPS)
5652 * - Commonly used (60 FPS)
5653 * - Multiples of 24 (48,72,96 FPS)
5655 * The list of standards video format is not huge and can be added to the
5656 * connector modeset list beforehand. With that, userspace can leverage
5657 * FreeSync to extends the front porch in order to attain the target refresh
5658 * rate. Such a switch will happen seamlessly, without screen blanking or
5659 * reprogramming of the output in any other way. If the userspace requests a
5660 * modesetting change compatible with FreeSync modes that only differ in the
5661 * refresh rate, DC will skip the full update and avoid blink during the
5662 * transition. For example, the video player can change the modesetting from
5663 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5664 * causing any display blink. This same concept can be applied to a mode
5667 static struct drm_display_mode *
5668 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5669 bool use_probed_modes)
5671 struct drm_display_mode *m, *m_pref = NULL;
5672 u16 current_refresh, highest_refresh;
5673 struct list_head *list_head = use_probed_modes ?
5674 &aconnector->base.probed_modes :
5675 &aconnector->base.modes;
5677 if (aconnector->freesync_vid_base.clock != 0)
5678 return &aconnector->freesync_vid_base;
5680 /* Find the preferred mode */
5681 list_for_each_entry (m, list_head, head) {
5682 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5689 /* Probably an EDID with no preferred mode. Fallback to first entry */
5690 m_pref = list_first_entry_or_null(
5691 &aconnector->base.modes, struct drm_display_mode, head);
5693 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5698 highest_refresh = drm_mode_vrefresh(m_pref);
5701 * Find the mode with highest refresh rate with same resolution.
5702 * For some monitors, preferred mode is not the mode with highest
5703 * supported refresh rate.
5705 list_for_each_entry (m, list_head, head) {
5706 current_refresh = drm_mode_vrefresh(m);
5708 if (m->hdisplay == m_pref->hdisplay &&
5709 m->vdisplay == m_pref->vdisplay &&
5710 highest_refresh < current_refresh) {
5711 highest_refresh = current_refresh;
5716 aconnector->freesync_vid_base = *m_pref;
5720 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5721 struct amdgpu_dm_connector *aconnector)
5723 struct drm_display_mode *high_mode;
5726 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5727 if (!high_mode || !mode)
5730 timing_diff = high_mode->vtotal - mode->vtotal;
5732 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5733 high_mode->hdisplay != mode->hdisplay ||
5734 high_mode->vdisplay != mode->vdisplay ||
5735 high_mode->hsync_start != mode->hsync_start ||
5736 high_mode->hsync_end != mode->hsync_end ||
5737 high_mode->htotal != mode->htotal ||
5738 high_mode->hskew != mode->hskew ||
5739 high_mode->vscan != mode->vscan ||
5740 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5741 high_mode->vsync_end - mode->vsync_end != timing_diff)
5747 static struct dc_stream_state *
5748 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5749 const struct drm_display_mode *drm_mode,
5750 const struct dm_connector_state *dm_state,
5751 const struct dc_stream_state *old_stream,
5754 struct drm_display_mode *preferred_mode = NULL;
5755 struct drm_connector *drm_connector;
5756 const struct drm_connector_state *con_state =
5757 dm_state ? &dm_state->base : NULL;
5758 struct dc_stream_state *stream = NULL;
5759 struct drm_display_mode mode = *drm_mode;
5760 struct drm_display_mode saved_mode;
5761 struct drm_display_mode *freesync_mode = NULL;
5762 bool native_mode_found = false;
5763 bool recalculate_timing = false;
5764 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5766 int preferred_refresh = 0;
5767 #if defined(CONFIG_DRM_AMD_DC_DCN)
5768 struct dsc_dec_dpcd_caps dsc_caps;
5770 struct dc_sink *sink = NULL;
5772 memset(&saved_mode, 0, sizeof(saved_mode));
5774 if (aconnector == NULL) {
5775 DRM_ERROR("aconnector is NULL!\n");
5779 drm_connector = &aconnector->base;
5781 if (!aconnector->dc_sink) {
5782 sink = create_fake_sink(aconnector);
5786 sink = aconnector->dc_sink;
5787 dc_sink_retain(sink);
5790 stream = dc_create_stream_for_sink(sink);
5792 if (stream == NULL) {
5793 DRM_ERROR("Failed to create stream for sink!\n");
5797 stream->dm_stream_context = aconnector;
5799 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5800 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5802 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5803 /* Search for preferred mode */
5804 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5805 native_mode_found = true;
5809 if (!native_mode_found)
5810 preferred_mode = list_first_entry_or_null(
5811 &aconnector->base.modes,
5812 struct drm_display_mode,
5815 mode_refresh = drm_mode_vrefresh(&mode);
5817 if (preferred_mode == NULL) {
5819 * This may not be an error, the use case is when we have no
5820 * usermode calls to reset and set mode upon hotplug. In this
5821 * case, we call set mode ourselves to restore the previous mode
5822 * and the modelist may not be filled in in time.
5824 DRM_DEBUG_DRIVER("No preferred mode found\n");
5826 recalculate_timing = amdgpu_freesync_vid_mode &&
5827 is_freesync_video_mode(&mode, aconnector);
5828 if (recalculate_timing) {
5829 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5831 mode = *freesync_mode;
5833 decide_crtc_timing_for_drm_display_mode(
5834 &mode, preferred_mode, scale);
5836 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5840 if (recalculate_timing)
5841 drm_mode_set_crtcinfo(&saved_mode, 0);
5843 drm_mode_set_crtcinfo(&mode, 0);
5846 * If scaling is enabled and refresh rate didn't change
5847 * we copy the vic and polarities of the old timings
5849 if (!scale || mode_refresh != preferred_refresh)
5850 fill_stream_properties_from_drm_display_mode(
5851 stream, &mode, &aconnector->base, con_state, NULL,
5854 fill_stream_properties_from_drm_display_mode(
5855 stream, &mode, &aconnector->base, con_state, old_stream,
5858 #if defined(CONFIG_DRM_AMD_DC_DCN)
5859 /* SST DSC determination policy */
5860 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5861 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5862 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5865 update_stream_scaling_settings(&mode, dm_state, stream);
5868 &stream->audio_info,
5872 update_stream_signal(stream, sink);
5874 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5875 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5877 if (stream->link->psr_settings.psr_feature_enabled) {
5879 // should decide stream support vsc sdp colorimetry capability
5880 // before building vsc info packet
5882 stream->use_vsc_sdp_for_colorimetry = false;
5883 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5884 stream->use_vsc_sdp_for_colorimetry =
5885 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5887 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5888 stream->use_vsc_sdp_for_colorimetry = true;
5890 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5891 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5895 dc_sink_release(sink);
5900 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5902 drm_crtc_cleanup(crtc);
5906 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5907 struct drm_crtc_state *state)
5909 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5911 /* TODO Destroy dc_stream objects are stream object is flattened */
5913 dc_stream_release(cur->stream);
5916 __drm_atomic_helper_crtc_destroy_state(state);
5922 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5924 struct dm_crtc_state *state;
5927 dm_crtc_destroy_state(crtc, crtc->state);
5929 state = kzalloc(sizeof(*state), GFP_KERNEL);
5930 if (WARN_ON(!state))
5933 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5936 static struct drm_crtc_state *
5937 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5939 struct dm_crtc_state *state, *cur;
5941 cur = to_dm_crtc_state(crtc->state);
5943 if (WARN_ON(!crtc->state))
5946 state = kzalloc(sizeof(*state), GFP_KERNEL);
5950 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5953 state->stream = cur->stream;
5954 dc_stream_retain(state->stream);
5957 state->active_planes = cur->active_planes;
5958 state->vrr_infopacket = cur->vrr_infopacket;
5959 state->abm_level = cur->abm_level;
5960 state->vrr_supported = cur->vrr_supported;
5961 state->freesync_config = cur->freesync_config;
5962 state->cm_has_degamma = cur->cm_has_degamma;
5963 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5964 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5966 return &state->base;
5969 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5970 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5972 crtc_debugfs_init(crtc);
5978 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5980 enum dc_irq_source irq_source;
5981 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5982 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5985 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5987 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5989 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5990 acrtc->crtc_id, enable ? "en" : "dis", rc);
5994 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5996 enum dc_irq_source irq_source;
5997 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5998 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5999 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6000 #if defined(CONFIG_DRM_AMD_DC_DCN)
6001 struct amdgpu_display_manager *dm = &adev->dm;
6002 struct vblank_control_work *work;
6007 /* vblank irq on -> Only need vupdate irq in vrr mode */
6008 if (amdgpu_dm_vrr_active(acrtc_state))
6009 rc = dm_set_vupdate_irq(crtc, true);
6011 /* vblank irq off -> vupdate irq off */
6012 rc = dm_set_vupdate_irq(crtc, false);
6018 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6020 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6023 if (amdgpu_in_reset(adev))
6026 #if defined(CONFIG_DRM_AMD_DC_DCN)
6027 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6031 INIT_WORK(&work->work, vblank_control_worker);
6033 work->acrtc = acrtc;
6034 work->enable = enable;
6036 if (acrtc_state->stream) {
6037 dc_stream_retain(acrtc_state->stream);
6038 work->stream = acrtc_state->stream;
6041 queue_work(dm->vblank_control_workqueue, &work->work);
6047 static int dm_enable_vblank(struct drm_crtc *crtc)
6049 return dm_set_vblank(crtc, true);
6052 static void dm_disable_vblank(struct drm_crtc *crtc)
6054 dm_set_vblank(crtc, false);
6057 /* Implemented only the options currently availible for the driver */
6058 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6059 .reset = dm_crtc_reset_state,
6060 .destroy = amdgpu_dm_crtc_destroy,
6061 .set_config = drm_atomic_helper_set_config,
6062 .page_flip = drm_atomic_helper_page_flip,
6063 .atomic_duplicate_state = dm_crtc_duplicate_state,
6064 .atomic_destroy_state = dm_crtc_destroy_state,
6065 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6066 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6067 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6068 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6069 .enable_vblank = dm_enable_vblank,
6070 .disable_vblank = dm_disable_vblank,
6071 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6072 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6073 .late_register = amdgpu_dm_crtc_late_register,
6077 static enum drm_connector_status
6078 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6081 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6085 * 1. This interface is NOT called in context of HPD irq.
6086 * 2. This interface *is called* in context of user-mode ioctl. Which
6087 * makes it a bad place for *any* MST-related activity.
6090 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6091 !aconnector->fake_enable)
6092 connected = (aconnector->dc_sink != NULL);
6094 connected = (aconnector->base.force == DRM_FORCE_ON);
6096 update_subconnector_property(aconnector);
6098 return (connected ? connector_status_connected :
6099 connector_status_disconnected);
6102 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6103 struct drm_connector_state *connector_state,
6104 struct drm_property *property,
6107 struct drm_device *dev = connector->dev;
6108 struct amdgpu_device *adev = drm_to_adev(dev);
6109 struct dm_connector_state *dm_old_state =
6110 to_dm_connector_state(connector->state);
6111 struct dm_connector_state *dm_new_state =
6112 to_dm_connector_state(connector_state);
6116 if (property == dev->mode_config.scaling_mode_property) {
6117 enum amdgpu_rmx_type rmx_type;
6120 case DRM_MODE_SCALE_CENTER:
6121 rmx_type = RMX_CENTER;
6123 case DRM_MODE_SCALE_ASPECT:
6124 rmx_type = RMX_ASPECT;
6126 case DRM_MODE_SCALE_FULLSCREEN:
6127 rmx_type = RMX_FULL;
6129 case DRM_MODE_SCALE_NONE:
6135 if (dm_old_state->scaling == rmx_type)
6138 dm_new_state->scaling = rmx_type;
6140 } else if (property == adev->mode_info.underscan_hborder_property) {
6141 dm_new_state->underscan_hborder = val;
6143 } else if (property == adev->mode_info.underscan_vborder_property) {
6144 dm_new_state->underscan_vborder = val;
6146 } else if (property == adev->mode_info.underscan_property) {
6147 dm_new_state->underscan_enable = val;
6149 } else if (property == adev->mode_info.abm_level_property) {
6150 dm_new_state->abm_level = val;
6157 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6158 const struct drm_connector_state *state,
6159 struct drm_property *property,
6162 struct drm_device *dev = connector->dev;
6163 struct amdgpu_device *adev = drm_to_adev(dev);
6164 struct dm_connector_state *dm_state =
6165 to_dm_connector_state(state);
6168 if (property == dev->mode_config.scaling_mode_property) {
6169 switch (dm_state->scaling) {
6171 *val = DRM_MODE_SCALE_CENTER;
6174 *val = DRM_MODE_SCALE_ASPECT;
6177 *val = DRM_MODE_SCALE_FULLSCREEN;
6181 *val = DRM_MODE_SCALE_NONE;
6185 } else if (property == adev->mode_info.underscan_hborder_property) {
6186 *val = dm_state->underscan_hborder;
6188 } else if (property == adev->mode_info.underscan_vborder_property) {
6189 *val = dm_state->underscan_vborder;
6191 } else if (property == adev->mode_info.underscan_property) {
6192 *val = dm_state->underscan_enable;
6194 } else if (property == adev->mode_info.abm_level_property) {
6195 *val = dm_state->abm_level;
6202 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6204 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6206 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6209 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6211 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6212 const struct dc_link *link = aconnector->dc_link;
6213 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6214 struct amdgpu_display_manager *dm = &adev->dm;
6218 * Call only if mst_mgr was iniitalized before since it's not done
6219 * for all connector types.
6221 if (aconnector->mst_mgr.dev)
6222 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6224 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6225 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6226 for (i = 0; i < dm->num_of_edps; i++) {
6227 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6228 backlight_device_unregister(dm->backlight_dev[i]);
6229 dm->backlight_dev[i] = NULL;
6234 if (aconnector->dc_em_sink)
6235 dc_sink_release(aconnector->dc_em_sink);
6236 aconnector->dc_em_sink = NULL;
6237 if (aconnector->dc_sink)
6238 dc_sink_release(aconnector->dc_sink);
6239 aconnector->dc_sink = NULL;
6241 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6242 drm_connector_unregister(connector);
6243 drm_connector_cleanup(connector);
6244 if (aconnector->i2c) {
6245 i2c_del_adapter(&aconnector->i2c->base);
6246 kfree(aconnector->i2c);
6248 kfree(aconnector->dm_dp_aux.aux.name);
6253 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6255 struct dm_connector_state *state =
6256 to_dm_connector_state(connector->state);
6258 if (connector->state)
6259 __drm_atomic_helper_connector_destroy_state(connector->state);
6263 state = kzalloc(sizeof(*state), GFP_KERNEL);
6266 state->scaling = RMX_OFF;
6267 state->underscan_enable = false;
6268 state->underscan_hborder = 0;
6269 state->underscan_vborder = 0;
6270 state->base.max_requested_bpc = 8;
6271 state->vcpi_slots = 0;
6273 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6274 state->abm_level = amdgpu_dm_abm_level;
6276 __drm_atomic_helper_connector_reset(connector, &state->base);
6280 struct drm_connector_state *
6281 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6283 struct dm_connector_state *state =
6284 to_dm_connector_state(connector->state);
6286 struct dm_connector_state *new_state =
6287 kmemdup(state, sizeof(*state), GFP_KERNEL);
6292 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6294 new_state->freesync_capable = state->freesync_capable;
6295 new_state->abm_level = state->abm_level;
6296 new_state->scaling = state->scaling;
6297 new_state->underscan_enable = state->underscan_enable;
6298 new_state->underscan_hborder = state->underscan_hborder;
6299 new_state->underscan_vborder = state->underscan_vborder;
6300 new_state->vcpi_slots = state->vcpi_slots;
6301 new_state->pbn = state->pbn;
6302 return &new_state->base;
6306 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6308 struct amdgpu_dm_connector *amdgpu_dm_connector =
6309 to_amdgpu_dm_connector(connector);
6312 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6313 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6314 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6315 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6320 #if defined(CONFIG_DEBUG_FS)
6321 connector_debugfs_init(amdgpu_dm_connector);
6327 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6328 .reset = amdgpu_dm_connector_funcs_reset,
6329 .detect = amdgpu_dm_connector_detect,
6330 .fill_modes = drm_helper_probe_single_connector_modes,
6331 .destroy = amdgpu_dm_connector_destroy,
6332 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6333 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6334 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6335 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6336 .late_register = amdgpu_dm_connector_late_register,
6337 .early_unregister = amdgpu_dm_connector_unregister
6340 static int get_modes(struct drm_connector *connector)
6342 return amdgpu_dm_connector_get_modes(connector);
6345 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6347 struct dc_sink_init_data init_params = {
6348 .link = aconnector->dc_link,
6349 .sink_signal = SIGNAL_TYPE_VIRTUAL
6353 if (!aconnector->base.edid_blob_ptr) {
6354 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6355 aconnector->base.name);
6357 aconnector->base.force = DRM_FORCE_OFF;
6358 aconnector->base.override_edid = false;
6362 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6364 aconnector->edid = edid;
6366 aconnector->dc_em_sink = dc_link_add_remote_sink(
6367 aconnector->dc_link,
6369 (edid->extensions + 1) * EDID_LENGTH,
6372 if (aconnector->base.force == DRM_FORCE_ON) {
6373 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6374 aconnector->dc_link->local_sink :
6375 aconnector->dc_em_sink;
6376 dc_sink_retain(aconnector->dc_sink);
6380 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6382 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6385 * In case of headless boot with force on for DP managed connector
6386 * Those settings have to be != 0 to get initial modeset
6388 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6389 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6390 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6394 aconnector->base.override_edid = true;
6395 create_eml_sink(aconnector);
6398 static struct dc_stream_state *
6399 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6400 const struct drm_display_mode *drm_mode,
6401 const struct dm_connector_state *dm_state,
6402 const struct dc_stream_state *old_stream)
6404 struct drm_connector *connector = &aconnector->base;
6405 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6406 struct dc_stream_state *stream;
6407 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6408 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6409 enum dc_status dc_result = DC_OK;
6412 stream = create_stream_for_sink(aconnector, drm_mode,
6413 dm_state, old_stream,
6415 if (stream == NULL) {
6416 DRM_ERROR("Failed to create stream for sink!\n");
6420 dc_result = dc_validate_stream(adev->dm.dc, stream);
6422 if (dc_result != DC_OK) {
6423 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6428 dc_status_to_str(dc_result));
6430 dc_stream_release(stream);
6432 requested_bpc -= 2; /* lower bpc to retry validation */
6435 } while (stream == NULL && requested_bpc >= 6);
6437 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6438 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6440 aconnector->force_yuv420_output = true;
6441 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6442 dm_state, old_stream);
6443 aconnector->force_yuv420_output = false;
6449 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6450 struct drm_display_mode *mode)
6452 int result = MODE_ERROR;
6453 struct dc_sink *dc_sink;
6454 /* TODO: Unhardcode stream count */
6455 struct dc_stream_state *stream;
6456 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6458 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6459 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6463 * Only run this the first time mode_valid is called to initilialize
6466 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6467 !aconnector->dc_em_sink)
6468 handle_edid_mgmt(aconnector);
6470 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6472 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6473 aconnector->base.force != DRM_FORCE_ON) {
6474 DRM_ERROR("dc_sink is NULL!\n");
6478 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6480 dc_stream_release(stream);
6485 /* TODO: error handling*/
6489 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6490 struct dc_info_packet *out)
6492 struct hdmi_drm_infoframe frame;
6493 unsigned char buf[30]; /* 26 + 4 */
6497 memset(out, 0, sizeof(*out));
6499 if (!state->hdr_output_metadata)
6502 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6506 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6510 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6514 /* Prepare the infopacket for DC. */
6515 switch (state->connector->connector_type) {
6516 case DRM_MODE_CONNECTOR_HDMIA:
6517 out->hb0 = 0x87; /* type */
6518 out->hb1 = 0x01; /* version */
6519 out->hb2 = 0x1A; /* length */
6520 out->sb[0] = buf[3]; /* checksum */
6524 case DRM_MODE_CONNECTOR_DisplayPort:
6525 case DRM_MODE_CONNECTOR_eDP:
6526 out->hb0 = 0x00; /* sdp id, zero */
6527 out->hb1 = 0x87; /* type */
6528 out->hb2 = 0x1D; /* payload len - 1 */
6529 out->hb3 = (0x13 << 2); /* sdp version */
6530 out->sb[0] = 0x01; /* version */
6531 out->sb[1] = 0x1A; /* length */
6539 memcpy(&out->sb[i], &buf[4], 26);
6542 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6543 sizeof(out->sb), false);
6549 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6550 struct drm_atomic_state *state)
6552 struct drm_connector_state *new_con_state =
6553 drm_atomic_get_new_connector_state(state, conn);
6554 struct drm_connector_state *old_con_state =
6555 drm_atomic_get_old_connector_state(state, conn);
6556 struct drm_crtc *crtc = new_con_state->crtc;
6557 struct drm_crtc_state *new_crtc_state;
6560 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6565 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6566 struct dc_info_packet hdr_infopacket;
6568 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6572 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6573 if (IS_ERR(new_crtc_state))
6574 return PTR_ERR(new_crtc_state);
6577 * DC considers the stream backends changed if the
6578 * static metadata changes. Forcing the modeset also
6579 * gives a simple way for userspace to switch from
6580 * 8bpc to 10bpc when setting the metadata to enter
6583 * Changing the static metadata after it's been
6584 * set is permissible, however. So only force a
6585 * modeset if we're entering or exiting HDR.
6587 new_crtc_state->mode_changed =
6588 !old_con_state->hdr_output_metadata ||
6589 !new_con_state->hdr_output_metadata;
6595 static const struct drm_connector_helper_funcs
6596 amdgpu_dm_connector_helper_funcs = {
6598 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6599 * modes will be filtered by drm_mode_validate_size(), and those modes
6600 * are missing after user start lightdm. So we need to renew modes list.
6601 * in get_modes call back, not just return the modes count
6603 .get_modes = get_modes,
6604 .mode_valid = amdgpu_dm_connector_mode_valid,
6605 .atomic_check = amdgpu_dm_connector_atomic_check,
6608 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6612 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6614 struct drm_atomic_state *state = new_crtc_state->state;
6615 struct drm_plane *plane;
6618 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6619 struct drm_plane_state *new_plane_state;
6621 /* Cursor planes are "fake". */
6622 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6625 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6627 if (!new_plane_state) {
6629 * The plane is enable on the CRTC and hasn't changed
6630 * state. This means that it previously passed
6631 * validation and is therefore enabled.
6637 /* We need a framebuffer to be considered enabled. */
6638 num_active += (new_plane_state->fb != NULL);
6644 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6645 struct drm_crtc_state *new_crtc_state)
6647 struct dm_crtc_state *dm_new_crtc_state =
6648 to_dm_crtc_state(new_crtc_state);
6650 dm_new_crtc_state->active_planes = 0;
6652 if (!dm_new_crtc_state->stream)
6655 dm_new_crtc_state->active_planes =
6656 count_crtc_active_planes(new_crtc_state);
6659 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6660 struct drm_atomic_state *state)
6662 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6664 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6665 struct dc *dc = adev->dm.dc;
6666 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6669 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6671 dm_update_crtc_active_planes(crtc, crtc_state);
6673 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6674 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6679 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6680 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6681 * planes are disabled, which is not supported by the hardware. And there is legacy
6682 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6684 if (crtc_state->enable &&
6685 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6686 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6690 /* In some use cases, like reset, no stream is attached */
6691 if (!dm_crtc_state->stream)
6694 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6697 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6701 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6702 const struct drm_display_mode *mode,
6703 struct drm_display_mode *adjusted_mode)
6708 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6709 .disable = dm_crtc_helper_disable,
6710 .atomic_check = dm_crtc_helper_atomic_check,
6711 .mode_fixup = dm_crtc_helper_mode_fixup,
6712 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6715 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6720 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6722 switch (display_color_depth) {
6723 case COLOR_DEPTH_666:
6725 case COLOR_DEPTH_888:
6727 case COLOR_DEPTH_101010:
6729 case COLOR_DEPTH_121212:
6731 case COLOR_DEPTH_141414:
6733 case COLOR_DEPTH_161616:
6741 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6742 struct drm_crtc_state *crtc_state,
6743 struct drm_connector_state *conn_state)
6745 struct drm_atomic_state *state = crtc_state->state;
6746 struct drm_connector *connector = conn_state->connector;
6747 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6748 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6749 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6750 struct drm_dp_mst_topology_mgr *mst_mgr;
6751 struct drm_dp_mst_port *mst_port;
6752 enum dc_color_depth color_depth;
6754 bool is_y420 = false;
6756 if (!aconnector->port || !aconnector->dc_sink)
6759 mst_port = aconnector->port;
6760 mst_mgr = &aconnector->mst_port->mst_mgr;
6762 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6765 if (!state->duplicated) {
6766 int max_bpc = conn_state->max_requested_bpc;
6767 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6768 aconnector->force_yuv420_output;
6769 color_depth = convert_color_depth_from_display_info(connector,
6772 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6773 clock = adjusted_mode->clock;
6774 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6776 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6779 dm_new_connector_state->pbn,
6780 dm_mst_get_pbn_divider(aconnector->dc_link));
6781 if (dm_new_connector_state->vcpi_slots < 0) {
6782 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6783 return dm_new_connector_state->vcpi_slots;
6788 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6789 .disable = dm_encoder_helper_disable,
6790 .atomic_check = dm_encoder_helper_atomic_check
6793 #if defined(CONFIG_DRM_AMD_DC_DCN)
6794 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6795 struct dc_state *dc_state)
6797 struct dc_stream_state *stream = NULL;
6798 struct drm_connector *connector;
6799 struct drm_connector_state *new_con_state;
6800 struct amdgpu_dm_connector *aconnector;
6801 struct dm_connector_state *dm_conn_state;
6802 int i, j, clock, bpp;
6803 int vcpi, pbn_div, pbn = 0;
6805 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6807 aconnector = to_amdgpu_dm_connector(connector);
6809 if (!aconnector->port)
6812 if (!new_con_state || !new_con_state->crtc)
6815 dm_conn_state = to_dm_connector_state(new_con_state);
6817 for (j = 0; j < dc_state->stream_count; j++) {
6818 stream = dc_state->streams[j];
6822 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6831 if (stream->timing.flags.DSC != 1) {
6832 drm_dp_mst_atomic_enable_dsc(state,
6840 pbn_div = dm_mst_get_pbn_divider(stream->link);
6841 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6842 clock = stream->timing.pix_clk_100hz / 10;
6843 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6844 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6851 dm_conn_state->pbn = pbn;
6852 dm_conn_state->vcpi_slots = vcpi;
6858 static void dm_drm_plane_reset(struct drm_plane *plane)
6860 struct dm_plane_state *amdgpu_state = NULL;
6863 plane->funcs->atomic_destroy_state(plane, plane->state);
6865 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6866 WARN_ON(amdgpu_state == NULL);
6869 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6872 static struct drm_plane_state *
6873 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6875 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6877 old_dm_plane_state = to_dm_plane_state(plane->state);
6878 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6879 if (!dm_plane_state)
6882 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6884 if (old_dm_plane_state->dc_state) {
6885 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6886 dc_plane_state_retain(dm_plane_state->dc_state);
6889 return &dm_plane_state->base;
6892 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6893 struct drm_plane_state *state)
6895 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6897 if (dm_plane_state->dc_state)
6898 dc_plane_state_release(dm_plane_state->dc_state);
6900 drm_atomic_helper_plane_destroy_state(plane, state);
6903 static const struct drm_plane_funcs dm_plane_funcs = {
6904 .update_plane = drm_atomic_helper_update_plane,
6905 .disable_plane = drm_atomic_helper_disable_plane,
6906 .destroy = drm_primary_helper_destroy,
6907 .reset = dm_drm_plane_reset,
6908 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6909 .atomic_destroy_state = dm_drm_plane_destroy_state,
6910 .format_mod_supported = dm_plane_format_mod_supported,
6913 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6914 struct drm_plane_state *new_state)
6916 struct amdgpu_framebuffer *afb;
6917 struct drm_gem_object *obj;
6918 struct amdgpu_device *adev;
6919 struct amdgpu_bo *rbo;
6920 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6921 struct list_head list;
6922 struct ttm_validate_buffer tv;
6923 struct ww_acquire_ctx ticket;
6927 if (!new_state->fb) {
6928 DRM_DEBUG_KMS("No FB bound\n");
6932 afb = to_amdgpu_framebuffer(new_state->fb);
6933 obj = new_state->fb->obj[0];
6934 rbo = gem_to_amdgpu_bo(obj);
6935 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6936 INIT_LIST_HEAD(&list);
6940 list_add(&tv.head, &list);
6942 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6944 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6948 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6949 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6951 domain = AMDGPU_GEM_DOMAIN_VRAM;
6953 r = amdgpu_bo_pin(rbo, domain);
6954 if (unlikely(r != 0)) {
6955 if (r != -ERESTARTSYS)
6956 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6957 ttm_eu_backoff_reservation(&ticket, &list);
6961 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6962 if (unlikely(r != 0)) {
6963 amdgpu_bo_unpin(rbo);
6964 ttm_eu_backoff_reservation(&ticket, &list);
6965 DRM_ERROR("%p bind failed\n", rbo);
6969 ttm_eu_backoff_reservation(&ticket, &list);
6971 afb->address = amdgpu_bo_gpu_offset(rbo);
6976 * We don't do surface updates on planes that have been newly created,
6977 * but we also don't have the afb->address during atomic check.
6979 * Fill in buffer attributes depending on the address here, but only on
6980 * newly created planes since they're not being used by DC yet and this
6981 * won't modify global state.
6983 dm_plane_state_old = to_dm_plane_state(plane->state);
6984 dm_plane_state_new = to_dm_plane_state(new_state);
6986 if (dm_plane_state_new->dc_state &&
6987 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6988 struct dc_plane_state *plane_state =
6989 dm_plane_state_new->dc_state;
6990 bool force_disable_dcc = !plane_state->dcc.enable;
6992 fill_plane_buffer_attributes(
6993 adev, afb, plane_state->format, plane_state->rotation,
6995 &plane_state->tiling_info, &plane_state->plane_size,
6996 &plane_state->dcc, &plane_state->address,
6997 afb->tmz_surface, force_disable_dcc);
7003 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7004 struct drm_plane_state *old_state)
7006 struct amdgpu_bo *rbo;
7012 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7013 r = amdgpu_bo_reserve(rbo, false);
7015 DRM_ERROR("failed to reserve rbo before unpin\n");
7019 amdgpu_bo_unpin(rbo);
7020 amdgpu_bo_unreserve(rbo);
7021 amdgpu_bo_unref(&rbo);
7024 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7025 struct drm_crtc_state *new_crtc_state)
7027 struct drm_framebuffer *fb = state->fb;
7028 int min_downscale, max_upscale;
7030 int max_scale = INT_MAX;
7032 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7033 if (fb && state->crtc) {
7034 /* Validate viewport to cover the case when only the position changes */
7035 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7036 int viewport_width = state->crtc_w;
7037 int viewport_height = state->crtc_h;
7039 if (state->crtc_x < 0)
7040 viewport_width += state->crtc_x;
7041 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7042 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7044 if (state->crtc_y < 0)
7045 viewport_height += state->crtc_y;
7046 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7047 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7049 if (viewport_width < 0 || viewport_height < 0) {
7050 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7052 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7053 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7055 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7056 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7062 /* Get min/max allowed scaling factors from plane caps. */
7063 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7064 &min_downscale, &max_upscale);
7066 * Convert to drm convention: 16.16 fixed point, instead of dc's
7067 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7068 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7070 min_scale = (1000 << 16) / max_upscale;
7071 max_scale = (1000 << 16) / min_downscale;
7074 return drm_atomic_helper_check_plane_state(
7075 state, new_crtc_state, min_scale, max_scale, true, true);
7078 static int dm_plane_atomic_check(struct drm_plane *plane,
7079 struct drm_atomic_state *state)
7081 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7083 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7084 struct dc *dc = adev->dm.dc;
7085 struct dm_plane_state *dm_plane_state;
7086 struct dc_scaling_info scaling_info;
7087 struct drm_crtc_state *new_crtc_state;
7090 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7092 dm_plane_state = to_dm_plane_state(new_plane_state);
7094 if (!dm_plane_state->dc_state)
7098 drm_atomic_get_new_crtc_state(state,
7099 new_plane_state->crtc);
7100 if (!new_crtc_state)
7103 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7107 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7111 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7117 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7118 struct drm_atomic_state *state)
7120 /* Only support async updates on cursor planes. */
7121 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7127 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7128 struct drm_atomic_state *state)
7130 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7132 struct drm_plane_state *old_state =
7133 drm_atomic_get_old_plane_state(state, plane);
7135 trace_amdgpu_dm_atomic_update_cursor(new_state);
7137 swap(plane->state->fb, new_state->fb);
7139 plane->state->src_x = new_state->src_x;
7140 plane->state->src_y = new_state->src_y;
7141 plane->state->src_w = new_state->src_w;
7142 plane->state->src_h = new_state->src_h;
7143 plane->state->crtc_x = new_state->crtc_x;
7144 plane->state->crtc_y = new_state->crtc_y;
7145 plane->state->crtc_w = new_state->crtc_w;
7146 plane->state->crtc_h = new_state->crtc_h;
7148 handle_cursor_update(plane, old_state);
7151 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7152 .prepare_fb = dm_plane_helper_prepare_fb,
7153 .cleanup_fb = dm_plane_helper_cleanup_fb,
7154 .atomic_check = dm_plane_atomic_check,
7155 .atomic_async_check = dm_plane_atomic_async_check,
7156 .atomic_async_update = dm_plane_atomic_async_update
7160 * TODO: these are currently initialized to rgb formats only.
7161 * For future use cases we should either initialize them dynamically based on
7162 * plane capabilities, or initialize this array to all formats, so internal drm
7163 * check will succeed, and let DC implement proper check
7165 static const uint32_t rgb_formats[] = {
7166 DRM_FORMAT_XRGB8888,
7167 DRM_FORMAT_ARGB8888,
7168 DRM_FORMAT_RGBA8888,
7169 DRM_FORMAT_XRGB2101010,
7170 DRM_FORMAT_XBGR2101010,
7171 DRM_FORMAT_ARGB2101010,
7172 DRM_FORMAT_ABGR2101010,
7173 DRM_FORMAT_XRGB16161616,
7174 DRM_FORMAT_XBGR16161616,
7175 DRM_FORMAT_ARGB16161616,
7176 DRM_FORMAT_ABGR16161616,
7177 DRM_FORMAT_XBGR8888,
7178 DRM_FORMAT_ABGR8888,
7182 static const uint32_t overlay_formats[] = {
7183 DRM_FORMAT_XRGB8888,
7184 DRM_FORMAT_ARGB8888,
7185 DRM_FORMAT_RGBA8888,
7186 DRM_FORMAT_XBGR8888,
7187 DRM_FORMAT_ABGR8888,
7191 static const u32 cursor_formats[] = {
7195 static int get_plane_formats(const struct drm_plane *plane,
7196 const struct dc_plane_cap *plane_cap,
7197 uint32_t *formats, int max_formats)
7199 int i, num_formats = 0;
7202 * TODO: Query support for each group of formats directly from
7203 * DC plane caps. This will require adding more formats to the
7207 switch (plane->type) {
7208 case DRM_PLANE_TYPE_PRIMARY:
7209 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7210 if (num_formats >= max_formats)
7213 formats[num_formats++] = rgb_formats[i];
7216 if (plane_cap && plane_cap->pixel_format_support.nv12)
7217 formats[num_formats++] = DRM_FORMAT_NV12;
7218 if (plane_cap && plane_cap->pixel_format_support.p010)
7219 formats[num_formats++] = DRM_FORMAT_P010;
7220 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7221 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7222 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7223 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7224 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7228 case DRM_PLANE_TYPE_OVERLAY:
7229 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7230 if (num_formats >= max_formats)
7233 formats[num_formats++] = overlay_formats[i];
7237 case DRM_PLANE_TYPE_CURSOR:
7238 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7239 if (num_formats >= max_formats)
7242 formats[num_formats++] = cursor_formats[i];
7250 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7251 struct drm_plane *plane,
7252 unsigned long possible_crtcs,
7253 const struct dc_plane_cap *plane_cap)
7255 uint32_t formats[32];
7258 unsigned int supported_rotations;
7259 uint64_t *modifiers = NULL;
7261 num_formats = get_plane_formats(plane, plane_cap, formats,
7262 ARRAY_SIZE(formats));
7264 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7268 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7269 &dm_plane_funcs, formats, num_formats,
7270 modifiers, plane->type, NULL);
7275 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7276 plane_cap && plane_cap->per_pixel_alpha) {
7277 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7278 BIT(DRM_MODE_BLEND_PREMULTI);
7280 drm_plane_create_alpha_property(plane);
7281 drm_plane_create_blend_mode_property(plane, blend_caps);
7284 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7286 (plane_cap->pixel_format_support.nv12 ||
7287 plane_cap->pixel_format_support.p010)) {
7288 /* This only affects YUV formats. */
7289 drm_plane_create_color_properties(
7291 BIT(DRM_COLOR_YCBCR_BT601) |
7292 BIT(DRM_COLOR_YCBCR_BT709) |
7293 BIT(DRM_COLOR_YCBCR_BT2020),
7294 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7295 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7296 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7299 supported_rotations =
7300 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7301 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7303 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7304 plane->type != DRM_PLANE_TYPE_CURSOR)
7305 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7306 supported_rotations);
7308 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7310 /* Create (reset) the plane state */
7311 if (plane->funcs->reset)
7312 plane->funcs->reset(plane);
7317 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7318 struct drm_plane *plane,
7319 uint32_t crtc_index)
7321 struct amdgpu_crtc *acrtc = NULL;
7322 struct drm_plane *cursor_plane;
7326 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7330 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7331 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7333 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7337 res = drm_crtc_init_with_planes(
7342 &amdgpu_dm_crtc_funcs, NULL);
7347 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7349 /* Create (reset) the plane state */
7350 if (acrtc->base.funcs->reset)
7351 acrtc->base.funcs->reset(&acrtc->base);
7353 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7354 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7356 acrtc->crtc_id = crtc_index;
7357 acrtc->base.enabled = false;
7358 acrtc->otg_inst = -1;
7360 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7361 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7362 true, MAX_COLOR_LUT_ENTRIES);
7363 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7369 kfree(cursor_plane);
7374 static int to_drm_connector_type(enum signal_type st)
7377 case SIGNAL_TYPE_HDMI_TYPE_A:
7378 return DRM_MODE_CONNECTOR_HDMIA;
7379 case SIGNAL_TYPE_EDP:
7380 return DRM_MODE_CONNECTOR_eDP;
7381 case SIGNAL_TYPE_LVDS:
7382 return DRM_MODE_CONNECTOR_LVDS;
7383 case SIGNAL_TYPE_RGB:
7384 return DRM_MODE_CONNECTOR_VGA;
7385 case SIGNAL_TYPE_DISPLAY_PORT:
7386 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7387 return DRM_MODE_CONNECTOR_DisplayPort;
7388 case SIGNAL_TYPE_DVI_DUAL_LINK:
7389 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7390 return DRM_MODE_CONNECTOR_DVID;
7391 case SIGNAL_TYPE_VIRTUAL:
7392 return DRM_MODE_CONNECTOR_VIRTUAL;
7395 return DRM_MODE_CONNECTOR_Unknown;
7399 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7401 struct drm_encoder *encoder;
7403 /* There is only one encoder per connector */
7404 drm_connector_for_each_possible_encoder(connector, encoder)
7410 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7412 struct drm_encoder *encoder;
7413 struct amdgpu_encoder *amdgpu_encoder;
7415 encoder = amdgpu_dm_connector_to_encoder(connector);
7417 if (encoder == NULL)
7420 amdgpu_encoder = to_amdgpu_encoder(encoder);
7422 amdgpu_encoder->native_mode.clock = 0;
7424 if (!list_empty(&connector->probed_modes)) {
7425 struct drm_display_mode *preferred_mode = NULL;
7427 list_for_each_entry(preferred_mode,
7428 &connector->probed_modes,
7430 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7431 amdgpu_encoder->native_mode = *preferred_mode;
7439 static struct drm_display_mode *
7440 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7442 int hdisplay, int vdisplay)
7444 struct drm_device *dev = encoder->dev;
7445 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7446 struct drm_display_mode *mode = NULL;
7447 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7449 mode = drm_mode_duplicate(dev, native_mode);
7454 mode->hdisplay = hdisplay;
7455 mode->vdisplay = vdisplay;
7456 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7457 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7463 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7464 struct drm_connector *connector)
7466 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7467 struct drm_display_mode *mode = NULL;
7468 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7469 struct amdgpu_dm_connector *amdgpu_dm_connector =
7470 to_amdgpu_dm_connector(connector);
7474 char name[DRM_DISPLAY_MODE_LEN];
7477 } common_modes[] = {
7478 { "640x480", 640, 480},
7479 { "800x600", 800, 600},
7480 { "1024x768", 1024, 768},
7481 { "1280x720", 1280, 720},
7482 { "1280x800", 1280, 800},
7483 {"1280x1024", 1280, 1024},
7484 { "1440x900", 1440, 900},
7485 {"1680x1050", 1680, 1050},
7486 {"1600x1200", 1600, 1200},
7487 {"1920x1080", 1920, 1080},
7488 {"1920x1200", 1920, 1200}
7491 n = ARRAY_SIZE(common_modes);
7493 for (i = 0; i < n; i++) {
7494 struct drm_display_mode *curmode = NULL;
7495 bool mode_existed = false;
7497 if (common_modes[i].w > native_mode->hdisplay ||
7498 common_modes[i].h > native_mode->vdisplay ||
7499 (common_modes[i].w == native_mode->hdisplay &&
7500 common_modes[i].h == native_mode->vdisplay))
7503 list_for_each_entry(curmode, &connector->probed_modes, head) {
7504 if (common_modes[i].w == curmode->hdisplay &&
7505 common_modes[i].h == curmode->vdisplay) {
7506 mode_existed = true;
7514 mode = amdgpu_dm_create_common_mode(encoder,
7515 common_modes[i].name, common_modes[i].w,
7517 drm_mode_probed_add(connector, mode);
7518 amdgpu_dm_connector->num_modes++;
7522 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7525 struct amdgpu_dm_connector *amdgpu_dm_connector =
7526 to_amdgpu_dm_connector(connector);
7529 /* empty probed_modes */
7530 INIT_LIST_HEAD(&connector->probed_modes);
7531 amdgpu_dm_connector->num_modes =
7532 drm_add_edid_modes(connector, edid);
7534 /* sorting the probed modes before calling function
7535 * amdgpu_dm_get_native_mode() since EDID can have
7536 * more than one preferred mode. The modes that are
7537 * later in the probed mode list could be of higher
7538 * and preferred resolution. For example, 3840x2160
7539 * resolution in base EDID preferred timing and 4096x2160
7540 * preferred resolution in DID extension block later.
7542 drm_mode_sort(&connector->probed_modes);
7543 amdgpu_dm_get_native_mode(connector);
7545 /* Freesync capabilities are reset by calling
7546 * drm_add_edid_modes() and need to be
7549 amdgpu_dm_update_freesync_caps(connector, edid);
7551 amdgpu_dm_connector->num_modes = 0;
7555 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7556 struct drm_display_mode *mode)
7558 struct drm_display_mode *m;
7560 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7561 if (drm_mode_equal(m, mode))
7568 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7570 const struct drm_display_mode *m;
7571 struct drm_display_mode *new_mode;
7573 uint32_t new_modes_count = 0;
7575 /* Standard FPS values
7584 * 60 - Commonly used
7585 * 48,72,96 - Multiples of 24
7587 static const uint32_t common_rates[] = {
7588 23976, 24000, 25000, 29970, 30000,
7589 48000, 50000, 60000, 72000, 96000
7593 * Find mode with highest refresh rate with the same resolution
7594 * as the preferred mode. Some monitors report a preferred mode
7595 * with lower resolution than the highest refresh rate supported.
7598 m = get_highest_refresh_rate_mode(aconnector, true);
7602 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7603 uint64_t target_vtotal, target_vtotal_diff;
7606 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7609 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7610 common_rates[i] > aconnector->max_vfreq * 1000)
7613 num = (unsigned long long)m->clock * 1000 * 1000;
7614 den = common_rates[i] * (unsigned long long)m->htotal;
7615 target_vtotal = div_u64(num, den);
7616 target_vtotal_diff = target_vtotal - m->vtotal;
7618 /* Check for illegal modes */
7619 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7620 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7621 m->vtotal + target_vtotal_diff < m->vsync_end)
7624 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7628 new_mode->vtotal += (u16)target_vtotal_diff;
7629 new_mode->vsync_start += (u16)target_vtotal_diff;
7630 new_mode->vsync_end += (u16)target_vtotal_diff;
7631 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7632 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7634 if (!is_duplicate_mode(aconnector, new_mode)) {
7635 drm_mode_probed_add(&aconnector->base, new_mode);
7636 new_modes_count += 1;
7638 drm_mode_destroy(aconnector->base.dev, new_mode);
7641 return new_modes_count;
7644 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7647 struct amdgpu_dm_connector *amdgpu_dm_connector =
7648 to_amdgpu_dm_connector(connector);
7650 if (!(amdgpu_freesync_vid_mode && edid))
7653 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7654 amdgpu_dm_connector->num_modes +=
7655 add_fs_modes(amdgpu_dm_connector);
7658 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7660 struct amdgpu_dm_connector *amdgpu_dm_connector =
7661 to_amdgpu_dm_connector(connector);
7662 struct drm_encoder *encoder;
7663 struct edid *edid = amdgpu_dm_connector->edid;
7665 encoder = amdgpu_dm_connector_to_encoder(connector);
7667 if (!drm_edid_is_valid(edid)) {
7668 amdgpu_dm_connector->num_modes =
7669 drm_add_modes_noedid(connector, 640, 480);
7671 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7672 amdgpu_dm_connector_add_common_modes(encoder, connector);
7673 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7675 amdgpu_dm_fbc_init(connector);
7677 return amdgpu_dm_connector->num_modes;
7680 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7681 struct amdgpu_dm_connector *aconnector,
7683 struct dc_link *link,
7686 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7689 * Some of the properties below require access to state, like bpc.
7690 * Allocate some default initial connector state with our reset helper.
7692 if (aconnector->base.funcs->reset)
7693 aconnector->base.funcs->reset(&aconnector->base);
7695 aconnector->connector_id = link_index;
7696 aconnector->dc_link = link;
7697 aconnector->base.interlace_allowed = false;
7698 aconnector->base.doublescan_allowed = false;
7699 aconnector->base.stereo_allowed = false;
7700 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7701 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7702 aconnector->audio_inst = -1;
7703 mutex_init(&aconnector->hpd_lock);
7706 * configure support HPD hot plug connector_>polled default value is 0
7707 * which means HPD hot plug not supported
7709 switch (connector_type) {
7710 case DRM_MODE_CONNECTOR_HDMIA:
7711 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7712 aconnector->base.ycbcr_420_allowed =
7713 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7715 case DRM_MODE_CONNECTOR_DisplayPort:
7716 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7717 aconnector->base.ycbcr_420_allowed =
7718 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7720 case DRM_MODE_CONNECTOR_DVID:
7721 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7727 drm_object_attach_property(&aconnector->base.base,
7728 dm->ddev->mode_config.scaling_mode_property,
7729 DRM_MODE_SCALE_NONE);
7731 drm_object_attach_property(&aconnector->base.base,
7732 adev->mode_info.underscan_property,
7734 drm_object_attach_property(&aconnector->base.base,
7735 adev->mode_info.underscan_hborder_property,
7737 drm_object_attach_property(&aconnector->base.base,
7738 adev->mode_info.underscan_vborder_property,
7741 if (!aconnector->mst_port)
7742 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7744 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7745 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7746 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7748 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7749 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7750 drm_object_attach_property(&aconnector->base.base,
7751 adev->mode_info.abm_level_property, 0);
7754 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7755 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7756 connector_type == DRM_MODE_CONNECTOR_eDP) {
7757 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7759 if (!aconnector->mst_port)
7760 drm_connector_attach_vrr_capable_property(&aconnector->base);
7762 #ifdef CONFIG_DRM_AMD_DC_HDCP
7763 if (adev->dm.hdcp_workqueue)
7764 drm_connector_attach_content_protection_property(&aconnector->base, true);
7769 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7770 struct i2c_msg *msgs, int num)
7772 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7773 struct ddc_service *ddc_service = i2c->ddc_service;
7774 struct i2c_command cmd;
7778 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7783 cmd.number_of_payloads = num;
7784 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7787 for (i = 0; i < num; i++) {
7788 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7789 cmd.payloads[i].address = msgs[i].addr;
7790 cmd.payloads[i].length = msgs[i].len;
7791 cmd.payloads[i].data = msgs[i].buf;
7795 ddc_service->ctx->dc,
7796 ddc_service->ddc_pin->hw_info.ddc_channel,
7800 kfree(cmd.payloads);
7804 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7806 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7809 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7810 .master_xfer = amdgpu_dm_i2c_xfer,
7811 .functionality = amdgpu_dm_i2c_func,
7814 static struct amdgpu_i2c_adapter *
7815 create_i2c(struct ddc_service *ddc_service,
7819 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7820 struct amdgpu_i2c_adapter *i2c;
7822 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7825 i2c->base.owner = THIS_MODULE;
7826 i2c->base.class = I2C_CLASS_DDC;
7827 i2c->base.dev.parent = &adev->pdev->dev;
7828 i2c->base.algo = &amdgpu_dm_i2c_algo;
7829 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7830 i2c_set_adapdata(&i2c->base, i2c);
7831 i2c->ddc_service = ddc_service;
7832 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7839 * Note: this function assumes that dc_link_detect() was called for the
7840 * dc_link which will be represented by this aconnector.
7842 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7843 struct amdgpu_dm_connector *aconnector,
7844 uint32_t link_index,
7845 struct amdgpu_encoder *aencoder)
7849 struct dc *dc = dm->dc;
7850 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7851 struct amdgpu_i2c_adapter *i2c;
7853 link->priv = aconnector;
7855 DRM_DEBUG_DRIVER("%s()\n", __func__);
7857 i2c = create_i2c(link->ddc, link->link_index, &res);
7859 DRM_ERROR("Failed to create i2c adapter data\n");
7863 aconnector->i2c = i2c;
7864 res = i2c_add_adapter(&i2c->base);
7867 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7871 connector_type = to_drm_connector_type(link->connector_signal);
7873 res = drm_connector_init_with_ddc(
7876 &amdgpu_dm_connector_funcs,
7881 DRM_ERROR("connector_init failed\n");
7882 aconnector->connector_id = -1;
7886 drm_connector_helper_add(
7888 &amdgpu_dm_connector_helper_funcs);
7890 amdgpu_dm_connector_init_helper(
7897 drm_connector_attach_encoder(
7898 &aconnector->base, &aencoder->base);
7900 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7901 || connector_type == DRM_MODE_CONNECTOR_eDP)
7902 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7907 aconnector->i2c = NULL;
7912 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7914 switch (adev->mode_info.num_crtc) {
7931 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7932 struct amdgpu_encoder *aencoder,
7933 uint32_t link_index)
7935 struct amdgpu_device *adev = drm_to_adev(dev);
7937 int res = drm_encoder_init(dev,
7939 &amdgpu_dm_encoder_funcs,
7940 DRM_MODE_ENCODER_TMDS,
7943 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7946 aencoder->encoder_id = link_index;
7948 aencoder->encoder_id = -1;
7950 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7955 static void manage_dm_interrupts(struct amdgpu_device *adev,
7956 struct amdgpu_crtc *acrtc,
7960 * We have no guarantee that the frontend index maps to the same
7961 * backend index - some even map to more than one.
7963 * TODO: Use a different interrupt or check DC itself for the mapping.
7966 amdgpu_display_crtc_idx_to_irq_type(
7971 drm_crtc_vblank_on(&acrtc->base);
7974 &adev->pageflip_irq,
7976 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7983 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7991 &adev->pageflip_irq,
7993 drm_crtc_vblank_off(&acrtc->base);
7997 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7998 struct amdgpu_crtc *acrtc)
8001 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8004 * This reads the current state for the IRQ and force reapplies
8005 * the setting to hardware.
8007 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8011 is_scaling_state_different(const struct dm_connector_state *dm_state,
8012 const struct dm_connector_state *old_dm_state)
8014 if (dm_state->scaling != old_dm_state->scaling)
8016 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8017 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8019 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8020 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8022 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8023 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8028 #ifdef CONFIG_DRM_AMD_DC_HDCP
8029 static bool is_content_protection_different(struct drm_connector_state *state,
8030 const struct drm_connector_state *old_state,
8031 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8033 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8034 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8036 /* Handle: Type0/1 change */
8037 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8038 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8039 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8043 /* CP is being re enabled, ignore this
8045 * Handles: ENABLED -> DESIRED
8047 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8048 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8049 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8053 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8055 * Handles: UNDESIRED -> ENABLED
8057 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8058 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8059 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8061 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8062 * hot-plug, headless s3, dpms
8064 * Handles: DESIRED -> DESIRED (Special case)
8066 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8067 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8068 dm_con_state->update_hdcp = false;
8073 * Handles: UNDESIRED -> UNDESIRED
8074 * DESIRED -> DESIRED
8075 * ENABLED -> ENABLED
8077 if (old_state->content_protection == state->content_protection)
8081 * Handles: UNDESIRED -> DESIRED
8082 * DESIRED -> UNDESIRED
8083 * ENABLED -> UNDESIRED
8085 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8089 * Handles: DESIRED -> ENABLED
8095 static void remove_stream(struct amdgpu_device *adev,
8096 struct amdgpu_crtc *acrtc,
8097 struct dc_stream_state *stream)
8099 /* this is the update mode case */
8101 acrtc->otg_inst = -1;
8102 acrtc->enabled = false;
8105 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8106 struct dc_cursor_position *position)
8108 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8110 int xorigin = 0, yorigin = 0;
8112 if (!crtc || !plane->state->fb)
8115 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8116 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8117 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8119 plane->state->crtc_w,
8120 plane->state->crtc_h);
8124 x = plane->state->crtc_x;
8125 y = plane->state->crtc_y;
8127 if (x <= -amdgpu_crtc->max_cursor_width ||
8128 y <= -amdgpu_crtc->max_cursor_height)
8132 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8136 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8139 position->enable = true;
8140 position->translate_by_source = true;
8143 position->x_hotspot = xorigin;
8144 position->y_hotspot = yorigin;
8149 static void handle_cursor_update(struct drm_plane *plane,
8150 struct drm_plane_state *old_plane_state)
8152 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8153 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8154 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8155 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8156 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8157 uint64_t address = afb ? afb->address : 0;
8158 struct dc_cursor_position position = {0};
8159 struct dc_cursor_attributes attributes;
8162 if (!plane->state->fb && !old_plane_state->fb)
8165 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8167 amdgpu_crtc->crtc_id,
8168 plane->state->crtc_w,
8169 plane->state->crtc_h);
8171 ret = get_cursor_position(plane, crtc, &position);
8175 if (!position.enable) {
8176 /* turn off cursor */
8177 if (crtc_state && crtc_state->stream) {
8178 mutex_lock(&adev->dm.dc_lock);
8179 dc_stream_set_cursor_position(crtc_state->stream,
8181 mutex_unlock(&adev->dm.dc_lock);
8186 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8187 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8189 memset(&attributes, 0, sizeof(attributes));
8190 attributes.address.high_part = upper_32_bits(address);
8191 attributes.address.low_part = lower_32_bits(address);
8192 attributes.width = plane->state->crtc_w;
8193 attributes.height = plane->state->crtc_h;
8194 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8195 attributes.rotation_angle = 0;
8196 attributes.attribute_flags.value = 0;
8198 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8200 if (crtc_state->stream) {
8201 mutex_lock(&adev->dm.dc_lock);
8202 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8204 DRM_ERROR("DC failed to set cursor attributes\n");
8206 if (!dc_stream_set_cursor_position(crtc_state->stream,
8208 DRM_ERROR("DC failed to set cursor position\n");
8209 mutex_unlock(&adev->dm.dc_lock);
8213 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8216 assert_spin_locked(&acrtc->base.dev->event_lock);
8217 WARN_ON(acrtc->event);
8219 acrtc->event = acrtc->base.state->event;
8221 /* Set the flip status */
8222 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8224 /* Mark this event as consumed */
8225 acrtc->base.state->event = NULL;
8227 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8231 static void update_freesync_state_on_stream(
8232 struct amdgpu_display_manager *dm,
8233 struct dm_crtc_state *new_crtc_state,
8234 struct dc_stream_state *new_stream,
8235 struct dc_plane_state *surface,
8236 u32 flip_timestamp_in_us)
8238 struct mod_vrr_params vrr_params;
8239 struct dc_info_packet vrr_infopacket = {0};
8240 struct amdgpu_device *adev = dm->adev;
8241 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8242 unsigned long flags;
8243 bool pack_sdp_v1_3 = false;
8249 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8250 * For now it's sufficient to just guard against these conditions.
8253 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8256 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8257 vrr_params = acrtc->dm_irq_params.vrr_params;
8260 mod_freesync_handle_preflip(
8261 dm->freesync_module,
8264 flip_timestamp_in_us,
8267 if (adev->family < AMDGPU_FAMILY_AI &&
8268 amdgpu_dm_vrr_active(new_crtc_state)) {
8269 mod_freesync_handle_v_update(dm->freesync_module,
8270 new_stream, &vrr_params);
8272 /* Need to call this before the frame ends. */
8273 dc_stream_adjust_vmin_vmax(dm->dc,
8274 new_crtc_state->stream,
8275 &vrr_params.adjust);
8279 mod_freesync_build_vrr_infopacket(
8280 dm->freesync_module,
8284 TRANSFER_FUNC_UNKNOWN,
8288 new_crtc_state->freesync_timing_changed |=
8289 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8291 sizeof(vrr_params.adjust)) != 0);
8293 new_crtc_state->freesync_vrr_info_changed |=
8294 (memcmp(&new_crtc_state->vrr_infopacket,
8296 sizeof(vrr_infopacket)) != 0);
8298 acrtc->dm_irq_params.vrr_params = vrr_params;
8299 new_crtc_state->vrr_infopacket = vrr_infopacket;
8301 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8302 new_stream->vrr_infopacket = vrr_infopacket;
8304 if (new_crtc_state->freesync_vrr_info_changed)
8305 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8306 new_crtc_state->base.crtc->base.id,
8307 (int)new_crtc_state->base.vrr_enabled,
8308 (int)vrr_params.state);
8310 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8313 static void update_stream_irq_parameters(
8314 struct amdgpu_display_manager *dm,
8315 struct dm_crtc_state *new_crtc_state)
8317 struct dc_stream_state *new_stream = new_crtc_state->stream;
8318 struct mod_vrr_params vrr_params;
8319 struct mod_freesync_config config = new_crtc_state->freesync_config;
8320 struct amdgpu_device *adev = dm->adev;
8321 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8322 unsigned long flags;
8328 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8329 * For now it's sufficient to just guard against these conditions.
8331 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8334 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8335 vrr_params = acrtc->dm_irq_params.vrr_params;
8337 if (new_crtc_state->vrr_supported &&
8338 config.min_refresh_in_uhz &&
8339 config.max_refresh_in_uhz) {
8341 * if freesync compatible mode was set, config.state will be set
8344 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8345 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8346 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8347 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8348 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8349 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8350 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8352 config.state = new_crtc_state->base.vrr_enabled ?
8353 VRR_STATE_ACTIVE_VARIABLE :
8357 config.state = VRR_STATE_UNSUPPORTED;
8360 mod_freesync_build_vrr_params(dm->freesync_module,
8362 &config, &vrr_params);
8364 new_crtc_state->freesync_timing_changed |=
8365 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8366 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8368 new_crtc_state->freesync_config = config;
8369 /* Copy state for access from DM IRQ handler */
8370 acrtc->dm_irq_params.freesync_config = config;
8371 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8372 acrtc->dm_irq_params.vrr_params = vrr_params;
8373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8376 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8377 struct dm_crtc_state *new_state)
8379 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8380 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8382 if (!old_vrr_active && new_vrr_active) {
8383 /* Transition VRR inactive -> active:
8384 * While VRR is active, we must not disable vblank irq, as a
8385 * reenable after disable would compute bogus vblank/pflip
8386 * timestamps if it likely happened inside display front-porch.
8388 * We also need vupdate irq for the actual core vblank handling
8391 dm_set_vupdate_irq(new_state->base.crtc, true);
8392 drm_crtc_vblank_get(new_state->base.crtc);
8393 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8394 __func__, new_state->base.crtc->base.id);
8395 } else if (old_vrr_active && !new_vrr_active) {
8396 /* Transition VRR active -> inactive:
8397 * Allow vblank irq disable again for fixed refresh rate.
8399 dm_set_vupdate_irq(new_state->base.crtc, false);
8400 drm_crtc_vblank_put(new_state->base.crtc);
8401 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8402 __func__, new_state->base.crtc->base.id);
8406 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8408 struct drm_plane *plane;
8409 struct drm_plane_state *old_plane_state;
8413 * TODO: Make this per-stream so we don't issue redundant updates for
8414 * commits with multiple streams.
8416 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8417 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8418 handle_cursor_update(plane, old_plane_state);
8421 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8422 struct dc_state *dc_state,
8423 struct drm_device *dev,
8424 struct amdgpu_display_manager *dm,
8425 struct drm_crtc *pcrtc,
8426 bool wait_for_vblank)
8429 uint64_t timestamp_ns;
8430 struct drm_plane *plane;
8431 struct drm_plane_state *old_plane_state, *new_plane_state;
8432 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8433 struct drm_crtc_state *new_pcrtc_state =
8434 drm_atomic_get_new_crtc_state(state, pcrtc);
8435 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8436 struct dm_crtc_state *dm_old_crtc_state =
8437 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8438 int planes_count = 0, vpos, hpos;
8440 unsigned long flags;
8441 struct amdgpu_bo *abo;
8442 uint32_t target_vblank, last_flip_vblank;
8443 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8444 bool pflip_present = false;
8446 struct dc_surface_update surface_updates[MAX_SURFACES];
8447 struct dc_plane_info plane_infos[MAX_SURFACES];
8448 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8449 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8450 struct dc_stream_update stream_update;
8453 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8456 dm_error("Failed to allocate update bundle\n");
8461 * Disable the cursor first if we're disabling all the planes.
8462 * It'll remain on the screen after the planes are re-enabled
8465 if (acrtc_state->active_planes == 0)
8466 amdgpu_dm_commit_cursors(state);
8468 /* update planes when needed */
8469 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8470 struct drm_crtc *crtc = new_plane_state->crtc;
8471 struct drm_crtc_state *new_crtc_state;
8472 struct drm_framebuffer *fb = new_plane_state->fb;
8473 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8474 bool plane_needs_flip;
8475 struct dc_plane_state *dc_plane;
8476 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8478 /* Cursor plane is handled after stream updates */
8479 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8482 if (!fb || !crtc || pcrtc != crtc)
8485 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8486 if (!new_crtc_state->active)
8489 dc_plane = dm_new_plane_state->dc_state;
8491 bundle->surface_updates[planes_count].surface = dc_plane;
8492 if (new_pcrtc_state->color_mgmt_changed) {
8493 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8494 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8495 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8498 fill_dc_scaling_info(new_plane_state,
8499 &bundle->scaling_infos[planes_count]);
8501 bundle->surface_updates[planes_count].scaling_info =
8502 &bundle->scaling_infos[planes_count];
8504 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8506 pflip_present = pflip_present || plane_needs_flip;
8508 if (!plane_needs_flip) {
8513 abo = gem_to_amdgpu_bo(fb->obj[0]);
8516 * Wait for all fences on this FB. Do limited wait to avoid
8517 * deadlock during GPU reset when this fence will not signal
8518 * but we hold reservation lock for the BO.
8520 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8521 msecs_to_jiffies(5000));
8522 if (unlikely(r <= 0))
8523 DRM_ERROR("Waiting for fences timed out!");
8525 fill_dc_plane_info_and_addr(
8526 dm->adev, new_plane_state,
8528 &bundle->plane_infos[planes_count],
8529 &bundle->flip_addrs[planes_count].address,
8530 afb->tmz_surface, false);
8532 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8533 new_plane_state->plane->index,
8534 bundle->plane_infos[planes_count].dcc.enable);
8536 bundle->surface_updates[planes_count].plane_info =
8537 &bundle->plane_infos[planes_count];
8540 * Only allow immediate flips for fast updates that don't
8541 * change FB pitch, DCC state, rotation or mirroing.
8543 bundle->flip_addrs[planes_count].flip_immediate =
8544 crtc->state->async_flip &&
8545 acrtc_state->update_type == UPDATE_TYPE_FAST;
8547 timestamp_ns = ktime_get_ns();
8548 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8549 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8550 bundle->surface_updates[planes_count].surface = dc_plane;
8552 if (!bundle->surface_updates[planes_count].surface) {
8553 DRM_ERROR("No surface for CRTC: id=%d\n",
8554 acrtc_attach->crtc_id);
8558 if (plane == pcrtc->primary)
8559 update_freesync_state_on_stream(
8562 acrtc_state->stream,
8564 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8566 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8568 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8569 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8575 if (pflip_present) {
8577 /* Use old throttling in non-vrr fixed refresh rate mode
8578 * to keep flip scheduling based on target vblank counts
8579 * working in a backwards compatible way, e.g., for
8580 * clients using the GLX_OML_sync_control extension or
8581 * DRI3/Present extension with defined target_msc.
8583 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8586 /* For variable refresh rate mode only:
8587 * Get vblank of last completed flip to avoid > 1 vrr
8588 * flips per video frame by use of throttling, but allow
8589 * flip programming anywhere in the possibly large
8590 * variable vrr vblank interval for fine-grained flip
8591 * timing control and more opportunity to avoid stutter
8592 * on late submission of flips.
8594 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8595 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8596 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8599 target_vblank = last_flip_vblank + wait_for_vblank;
8602 * Wait until we're out of the vertical blank period before the one
8603 * targeted by the flip
8605 while ((acrtc_attach->enabled &&
8606 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8607 0, &vpos, &hpos, NULL,
8608 NULL, &pcrtc->hwmode)
8609 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8610 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8611 (int)(target_vblank -
8612 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8613 usleep_range(1000, 1100);
8617 * Prepare the flip event for the pageflip interrupt to handle.
8619 * This only works in the case where we've already turned on the
8620 * appropriate hardware blocks (eg. HUBP) so in the transition case
8621 * from 0 -> n planes we have to skip a hardware generated event
8622 * and rely on sending it from software.
8624 if (acrtc_attach->base.state->event &&
8625 acrtc_state->active_planes > 0) {
8626 drm_crtc_vblank_get(pcrtc);
8628 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8630 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8631 prepare_flip_isr(acrtc_attach);
8633 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8636 if (acrtc_state->stream) {
8637 if (acrtc_state->freesync_vrr_info_changed)
8638 bundle->stream_update.vrr_infopacket =
8639 &acrtc_state->stream->vrr_infopacket;
8643 /* Update the planes if changed or disable if we don't have any. */
8644 if ((planes_count || acrtc_state->active_planes == 0) &&
8645 acrtc_state->stream) {
8646 #if defined(CONFIG_DRM_AMD_DC_DCN)
8648 * If PSR or idle optimizations are enabled then flush out
8649 * any pending work before hardware programming.
8651 flush_workqueue(dm->vblank_control_workqueue);
8654 bundle->stream_update.stream = acrtc_state->stream;
8655 if (new_pcrtc_state->mode_changed) {
8656 bundle->stream_update.src = acrtc_state->stream->src;
8657 bundle->stream_update.dst = acrtc_state->stream->dst;
8660 if (new_pcrtc_state->color_mgmt_changed) {
8662 * TODO: This isn't fully correct since we've actually
8663 * already modified the stream in place.
8665 bundle->stream_update.gamut_remap =
8666 &acrtc_state->stream->gamut_remap_matrix;
8667 bundle->stream_update.output_csc_transform =
8668 &acrtc_state->stream->csc_color_matrix;
8669 bundle->stream_update.out_transfer_func =
8670 acrtc_state->stream->out_transfer_func;
8673 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8674 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8675 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8678 * If FreeSync state on the stream has changed then we need to
8679 * re-adjust the min/max bounds now that DC doesn't handle this
8680 * as part of commit.
8682 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8683 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8684 dc_stream_adjust_vmin_vmax(
8685 dm->dc, acrtc_state->stream,
8686 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8687 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8689 mutex_lock(&dm->dc_lock);
8690 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8691 acrtc_state->stream->link->psr_settings.psr_allow_active)
8692 amdgpu_dm_psr_disable(acrtc_state->stream);
8694 dc_commit_updates_for_stream(dm->dc,
8695 bundle->surface_updates,
8697 acrtc_state->stream,
8698 &bundle->stream_update,
8702 * Enable or disable the interrupts on the backend.
8704 * Most pipes are put into power gating when unused.
8706 * When power gating is enabled on a pipe we lose the
8707 * interrupt enablement state when power gating is disabled.
8709 * So we need to update the IRQ control state in hardware
8710 * whenever the pipe turns on (since it could be previously
8711 * power gated) or off (since some pipes can't be power gated
8714 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8715 dm_update_pflip_irq_state(drm_to_adev(dev),
8718 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8719 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8720 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8721 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8723 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8724 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8725 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8726 struct amdgpu_dm_connector *aconn =
8727 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8729 if (aconn->psr_skip_count > 0)
8730 aconn->psr_skip_count--;
8732 /* Allow PSR when skip count is 0. */
8733 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8735 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8738 mutex_unlock(&dm->dc_lock);
8742 * Update cursor state *after* programming all the planes.
8743 * This avoids redundant programming in the case where we're going
8744 * to be disabling a single plane - those pipes are being disabled.
8746 if (acrtc_state->active_planes)
8747 amdgpu_dm_commit_cursors(state);
8753 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8754 struct drm_atomic_state *state)
8756 struct amdgpu_device *adev = drm_to_adev(dev);
8757 struct amdgpu_dm_connector *aconnector;
8758 struct drm_connector *connector;
8759 struct drm_connector_state *old_con_state, *new_con_state;
8760 struct drm_crtc_state *new_crtc_state;
8761 struct dm_crtc_state *new_dm_crtc_state;
8762 const struct dc_stream_status *status;
8765 /* Notify device removals. */
8766 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8767 if (old_con_state->crtc != new_con_state->crtc) {
8768 /* CRTC changes require notification. */
8772 if (!new_con_state->crtc)
8775 new_crtc_state = drm_atomic_get_new_crtc_state(
8776 state, new_con_state->crtc);
8778 if (!new_crtc_state)
8781 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8785 aconnector = to_amdgpu_dm_connector(connector);
8787 mutex_lock(&adev->dm.audio_lock);
8788 inst = aconnector->audio_inst;
8789 aconnector->audio_inst = -1;
8790 mutex_unlock(&adev->dm.audio_lock);
8792 amdgpu_dm_audio_eld_notify(adev, inst);
8795 /* Notify audio device additions. */
8796 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8797 if (!new_con_state->crtc)
8800 new_crtc_state = drm_atomic_get_new_crtc_state(
8801 state, new_con_state->crtc);
8803 if (!new_crtc_state)
8806 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8809 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8810 if (!new_dm_crtc_state->stream)
8813 status = dc_stream_get_status(new_dm_crtc_state->stream);
8817 aconnector = to_amdgpu_dm_connector(connector);
8819 mutex_lock(&adev->dm.audio_lock);
8820 inst = status->audio_inst;
8821 aconnector->audio_inst = inst;
8822 mutex_unlock(&adev->dm.audio_lock);
8824 amdgpu_dm_audio_eld_notify(adev, inst);
8829 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8830 * @crtc_state: the DRM CRTC state
8831 * @stream_state: the DC stream state.
8833 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8834 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8836 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8837 struct dc_stream_state *stream_state)
8839 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8843 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8844 * @state: The atomic state to commit
8846 * This will tell DC to commit the constructed DC state from atomic_check,
8847 * programming the hardware. Any failures here implies a hardware failure, since
8848 * atomic check should have filtered anything non-kosher.
8850 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8852 struct drm_device *dev = state->dev;
8853 struct amdgpu_device *adev = drm_to_adev(dev);
8854 struct amdgpu_display_manager *dm = &adev->dm;
8855 struct dm_atomic_state *dm_state;
8856 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8858 struct drm_crtc *crtc;
8859 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8860 unsigned long flags;
8861 bool wait_for_vblank = true;
8862 struct drm_connector *connector;
8863 struct drm_connector_state *old_con_state, *new_con_state;
8864 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8865 int crtc_disable_count = 0;
8866 bool mode_set_reset_required = false;
8868 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8870 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8872 dm_state = dm_atomic_get_new_state(state);
8873 if (dm_state && dm_state->context) {
8874 dc_state = dm_state->context;
8876 /* No state changes, retain current state. */
8877 dc_state_temp = dc_create_state(dm->dc);
8878 ASSERT(dc_state_temp);
8879 dc_state = dc_state_temp;
8880 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8883 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8884 new_crtc_state, i) {
8885 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8887 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8889 if (old_crtc_state->active &&
8890 (!new_crtc_state->active ||
8891 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8892 manage_dm_interrupts(adev, acrtc, false);
8893 dc_stream_release(dm_old_crtc_state->stream);
8897 drm_atomic_helper_calc_timestamping_constants(state);
8899 /* update changed items */
8900 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8901 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8903 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8904 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8907 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8908 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8909 "connectors_changed:%d\n",
8911 new_crtc_state->enable,
8912 new_crtc_state->active,
8913 new_crtc_state->planes_changed,
8914 new_crtc_state->mode_changed,
8915 new_crtc_state->active_changed,
8916 new_crtc_state->connectors_changed);
8918 /* Disable cursor if disabling crtc */
8919 if (old_crtc_state->active && !new_crtc_state->active) {
8920 struct dc_cursor_position position;
8922 memset(&position, 0, sizeof(position));
8923 mutex_lock(&dm->dc_lock);
8924 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8925 mutex_unlock(&dm->dc_lock);
8928 /* Copy all transient state flags into dc state */
8929 if (dm_new_crtc_state->stream) {
8930 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8931 dm_new_crtc_state->stream);
8934 /* handles headless hotplug case, updating new_state and
8935 * aconnector as needed
8938 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8940 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8942 if (!dm_new_crtc_state->stream) {
8944 * this could happen because of issues with
8945 * userspace notifications delivery.
8946 * In this case userspace tries to set mode on
8947 * display which is disconnected in fact.
8948 * dc_sink is NULL in this case on aconnector.
8949 * We expect reset mode will come soon.
8951 * This can also happen when unplug is done
8952 * during resume sequence ended
8954 * In this case, we want to pretend we still
8955 * have a sink to keep the pipe running so that
8956 * hw state is consistent with the sw state
8958 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8959 __func__, acrtc->base.base.id);
8963 if (dm_old_crtc_state->stream)
8964 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8966 pm_runtime_get_noresume(dev->dev);
8968 acrtc->enabled = true;
8969 acrtc->hw_mode = new_crtc_state->mode;
8970 crtc->hwmode = new_crtc_state->mode;
8971 mode_set_reset_required = true;
8972 } else if (modereset_required(new_crtc_state)) {
8973 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8974 /* i.e. reset mode */
8975 if (dm_old_crtc_state->stream)
8976 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8978 mode_set_reset_required = true;
8980 } /* for_each_crtc_in_state() */
8983 /* if there mode set or reset, disable eDP PSR */
8984 if (mode_set_reset_required) {
8985 #if defined(CONFIG_DRM_AMD_DC_DCN)
8986 flush_workqueue(dm->vblank_control_workqueue);
8988 amdgpu_dm_psr_disable_all(dm);
8991 dm_enable_per_frame_crtc_master_sync(dc_state);
8992 mutex_lock(&dm->dc_lock);
8993 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8994 #if defined(CONFIG_DRM_AMD_DC_DCN)
8995 /* Allow idle optimization when vblank count is 0 for display off */
8996 if (dm->active_vblank_irq_count == 0)
8997 dc_allow_idle_optimizations(dm->dc,true);
8999 mutex_unlock(&dm->dc_lock);
9002 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9003 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9005 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9007 if (dm_new_crtc_state->stream != NULL) {
9008 const struct dc_stream_status *status =
9009 dc_stream_get_status(dm_new_crtc_state->stream);
9012 status = dc_stream_get_status_from_state(dc_state,
9013 dm_new_crtc_state->stream);
9015 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9017 acrtc->otg_inst = status->primary_otg_inst;
9020 #ifdef CONFIG_DRM_AMD_DC_HDCP
9021 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9022 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9023 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9024 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9026 new_crtc_state = NULL;
9029 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9031 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9033 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9034 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9035 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9036 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9037 dm_new_con_state->update_hdcp = true;
9041 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9042 hdcp_update_display(
9043 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9044 new_con_state->hdcp_content_type,
9045 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9049 /* Handle connector state changes */
9050 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9051 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9052 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9053 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9054 struct dc_surface_update dummy_updates[MAX_SURFACES];
9055 struct dc_stream_update stream_update;
9056 struct dc_info_packet hdr_packet;
9057 struct dc_stream_status *status = NULL;
9058 bool abm_changed, hdr_changed, scaling_changed;
9060 memset(&dummy_updates, 0, sizeof(dummy_updates));
9061 memset(&stream_update, 0, sizeof(stream_update));
9064 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9065 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9068 /* Skip any modesets/resets */
9069 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9072 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9073 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9075 scaling_changed = is_scaling_state_different(dm_new_con_state,
9078 abm_changed = dm_new_crtc_state->abm_level !=
9079 dm_old_crtc_state->abm_level;
9082 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9084 if (!scaling_changed && !abm_changed && !hdr_changed)
9087 stream_update.stream = dm_new_crtc_state->stream;
9088 if (scaling_changed) {
9089 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9090 dm_new_con_state, dm_new_crtc_state->stream);
9092 stream_update.src = dm_new_crtc_state->stream->src;
9093 stream_update.dst = dm_new_crtc_state->stream->dst;
9097 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9099 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9103 fill_hdr_info_packet(new_con_state, &hdr_packet);
9104 stream_update.hdr_static_metadata = &hdr_packet;
9107 status = dc_stream_get_status(dm_new_crtc_state->stream);
9109 if (WARN_ON(!status))
9112 WARN_ON(!status->plane_count);
9115 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9116 * Here we create an empty update on each plane.
9117 * To fix this, DC should permit updating only stream properties.
9119 for (j = 0; j < status->plane_count; j++)
9120 dummy_updates[j].surface = status->plane_states[0];
9123 mutex_lock(&dm->dc_lock);
9124 dc_commit_updates_for_stream(dm->dc,
9126 status->plane_count,
9127 dm_new_crtc_state->stream,
9130 mutex_unlock(&dm->dc_lock);
9133 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9134 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9135 new_crtc_state, i) {
9136 if (old_crtc_state->active && !new_crtc_state->active)
9137 crtc_disable_count++;
9139 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9140 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9142 /* For freesync config update on crtc state and params for irq */
9143 update_stream_irq_parameters(dm, dm_new_crtc_state);
9145 /* Handle vrr on->off / off->on transitions */
9146 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9151 * Enable interrupts for CRTCs that are newly enabled or went through
9152 * a modeset. It was intentionally deferred until after the front end
9153 * state was modified to wait until the OTG was on and so the IRQ
9154 * handlers didn't access stale or invalid state.
9156 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9157 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9158 #ifdef CONFIG_DEBUG_FS
9159 bool configure_crc = false;
9160 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9161 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9162 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9164 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9165 cur_crc_src = acrtc->dm_irq_params.crc_src;
9166 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9168 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9170 if (new_crtc_state->active &&
9171 (!old_crtc_state->active ||
9172 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9173 dc_stream_retain(dm_new_crtc_state->stream);
9174 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9175 manage_dm_interrupts(adev, acrtc, true);
9177 #ifdef CONFIG_DEBUG_FS
9179 * Frontend may have changed so reapply the CRC capture
9180 * settings for the stream.
9182 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9184 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9185 configure_crc = true;
9186 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9187 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9188 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9189 acrtc->dm_irq_params.crc_window.update_win = true;
9190 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9191 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9192 crc_rd_wrk->crtc = crtc;
9193 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9194 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9200 if (amdgpu_dm_crtc_configure_crc_source(
9201 crtc, dm_new_crtc_state, cur_crc_src))
9202 DRM_DEBUG_DRIVER("Failed to configure crc source");
9207 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9208 if (new_crtc_state->async_flip)
9209 wait_for_vblank = false;
9211 /* update planes when needed per crtc*/
9212 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9213 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9215 if (dm_new_crtc_state->stream)
9216 amdgpu_dm_commit_planes(state, dc_state, dev,
9217 dm, crtc, wait_for_vblank);
9220 /* Update audio instances for each connector. */
9221 amdgpu_dm_commit_audio(dev, state);
9223 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9224 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9225 /* restore the backlight level */
9226 for (i = 0; i < dm->num_of_edps; i++) {
9227 if (dm->backlight_dev[i] &&
9228 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9229 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9233 * send vblank event on all events not handled in flip and
9234 * mark consumed event for drm_atomic_helper_commit_hw_done
9236 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9237 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9239 if (new_crtc_state->event)
9240 drm_send_event_locked(dev, &new_crtc_state->event->base);
9242 new_crtc_state->event = NULL;
9244 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9246 /* Signal HW programming completion */
9247 drm_atomic_helper_commit_hw_done(state);
9249 if (wait_for_vblank)
9250 drm_atomic_helper_wait_for_flip_done(dev, state);
9252 drm_atomic_helper_cleanup_planes(dev, state);
9254 /* return the stolen vga memory back to VRAM */
9255 if (!adev->mman.keep_stolen_vga_memory)
9256 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9257 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9260 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9261 * so we can put the GPU into runtime suspend if we're not driving any
9264 for (i = 0; i < crtc_disable_count; i++)
9265 pm_runtime_put_autosuspend(dev->dev);
9266 pm_runtime_mark_last_busy(dev->dev);
9269 dc_release_state(dc_state_temp);
9273 static int dm_force_atomic_commit(struct drm_connector *connector)
9276 struct drm_device *ddev = connector->dev;
9277 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9278 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9279 struct drm_plane *plane = disconnected_acrtc->base.primary;
9280 struct drm_connector_state *conn_state;
9281 struct drm_crtc_state *crtc_state;
9282 struct drm_plane_state *plane_state;
9287 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9289 /* Construct an atomic state to restore previous display setting */
9292 * Attach connectors to drm_atomic_state
9294 conn_state = drm_atomic_get_connector_state(state, connector);
9296 ret = PTR_ERR_OR_ZERO(conn_state);
9300 /* Attach crtc to drm_atomic_state*/
9301 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9303 ret = PTR_ERR_OR_ZERO(crtc_state);
9307 /* force a restore */
9308 crtc_state->mode_changed = true;
9310 /* Attach plane to drm_atomic_state */
9311 plane_state = drm_atomic_get_plane_state(state, plane);
9313 ret = PTR_ERR_OR_ZERO(plane_state);
9317 /* Call commit internally with the state we just constructed */
9318 ret = drm_atomic_commit(state);
9321 drm_atomic_state_put(state);
9323 DRM_ERROR("Restoring old state failed with %i\n", ret);
9329 * This function handles all cases when set mode does not come upon hotplug.
9330 * This includes when a display is unplugged then plugged back into the
9331 * same port and when running without usermode desktop manager supprot
9333 void dm_restore_drm_connector_state(struct drm_device *dev,
9334 struct drm_connector *connector)
9336 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9337 struct amdgpu_crtc *disconnected_acrtc;
9338 struct dm_crtc_state *acrtc_state;
9340 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9343 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9344 if (!disconnected_acrtc)
9347 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9348 if (!acrtc_state->stream)
9352 * If the previous sink is not released and different from the current,
9353 * we deduce we are in a state where we can not rely on usermode call
9354 * to turn on the display, so we do it here
9356 if (acrtc_state->stream->sink != aconnector->dc_sink)
9357 dm_force_atomic_commit(&aconnector->base);
9361 * Grabs all modesetting locks to serialize against any blocking commits,
9362 * Waits for completion of all non blocking commits.
9364 static int do_aquire_global_lock(struct drm_device *dev,
9365 struct drm_atomic_state *state)
9367 struct drm_crtc *crtc;
9368 struct drm_crtc_commit *commit;
9372 * Adding all modeset locks to aquire_ctx will
9373 * ensure that when the framework release it the
9374 * extra locks we are locking here will get released to
9376 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9380 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9381 spin_lock(&crtc->commit_lock);
9382 commit = list_first_entry_or_null(&crtc->commit_list,
9383 struct drm_crtc_commit, commit_entry);
9385 drm_crtc_commit_get(commit);
9386 spin_unlock(&crtc->commit_lock);
9392 * Make sure all pending HW programming completed and
9395 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9398 ret = wait_for_completion_interruptible_timeout(
9399 &commit->flip_done, 10*HZ);
9402 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9403 "timed out\n", crtc->base.id, crtc->name);
9405 drm_crtc_commit_put(commit);
9408 return ret < 0 ? ret : 0;
9411 static void get_freesync_config_for_crtc(
9412 struct dm_crtc_state *new_crtc_state,
9413 struct dm_connector_state *new_con_state)
9415 struct mod_freesync_config config = {0};
9416 struct amdgpu_dm_connector *aconnector =
9417 to_amdgpu_dm_connector(new_con_state->base.connector);
9418 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9419 int vrefresh = drm_mode_vrefresh(mode);
9420 bool fs_vid_mode = false;
9422 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9423 vrefresh >= aconnector->min_vfreq &&
9424 vrefresh <= aconnector->max_vfreq;
9426 if (new_crtc_state->vrr_supported) {
9427 new_crtc_state->stream->ignore_msa_timing_param = true;
9428 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9430 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9431 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9432 config.vsif_supported = true;
9436 config.state = VRR_STATE_ACTIVE_FIXED;
9437 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9439 } else if (new_crtc_state->base.vrr_enabled) {
9440 config.state = VRR_STATE_ACTIVE_VARIABLE;
9442 config.state = VRR_STATE_INACTIVE;
9446 new_crtc_state->freesync_config = config;
9449 static void reset_freesync_config_for_crtc(
9450 struct dm_crtc_state *new_crtc_state)
9452 new_crtc_state->vrr_supported = false;
9454 memset(&new_crtc_state->vrr_infopacket, 0,
9455 sizeof(new_crtc_state->vrr_infopacket));
9459 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9460 struct drm_crtc_state *new_crtc_state)
9462 struct drm_display_mode old_mode, new_mode;
9464 if (!old_crtc_state || !new_crtc_state)
9467 old_mode = old_crtc_state->mode;
9468 new_mode = new_crtc_state->mode;
9470 if (old_mode.clock == new_mode.clock &&
9471 old_mode.hdisplay == new_mode.hdisplay &&
9472 old_mode.vdisplay == new_mode.vdisplay &&
9473 old_mode.htotal == new_mode.htotal &&
9474 old_mode.vtotal != new_mode.vtotal &&
9475 old_mode.hsync_start == new_mode.hsync_start &&
9476 old_mode.vsync_start != new_mode.vsync_start &&
9477 old_mode.hsync_end == new_mode.hsync_end &&
9478 old_mode.vsync_end != new_mode.vsync_end &&
9479 old_mode.hskew == new_mode.hskew &&
9480 old_mode.vscan == new_mode.vscan &&
9481 (old_mode.vsync_end - old_mode.vsync_start) ==
9482 (new_mode.vsync_end - new_mode.vsync_start))
9488 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9489 uint64_t num, den, res;
9490 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9492 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9494 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9495 den = (unsigned long long)new_crtc_state->mode.htotal *
9496 (unsigned long long)new_crtc_state->mode.vtotal;
9498 res = div_u64(num, den);
9499 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9502 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9503 struct drm_atomic_state *state,
9504 struct drm_crtc *crtc,
9505 struct drm_crtc_state *old_crtc_state,
9506 struct drm_crtc_state *new_crtc_state,
9508 bool *lock_and_validation_needed)
9510 struct dm_atomic_state *dm_state = NULL;
9511 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9512 struct dc_stream_state *new_stream;
9516 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9517 * update changed items
9519 struct amdgpu_crtc *acrtc = NULL;
9520 struct amdgpu_dm_connector *aconnector = NULL;
9521 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9522 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9526 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9527 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9528 acrtc = to_amdgpu_crtc(crtc);
9529 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9531 /* TODO This hack should go away */
9532 if (aconnector && enable) {
9533 /* Make sure fake sink is created in plug-in scenario */
9534 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9536 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9539 if (IS_ERR(drm_new_conn_state)) {
9540 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9544 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9545 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9547 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9550 new_stream = create_validate_stream_for_sink(aconnector,
9551 &new_crtc_state->mode,
9553 dm_old_crtc_state->stream);
9556 * we can have no stream on ACTION_SET if a display
9557 * was disconnected during S3, in this case it is not an
9558 * error, the OS will be updated after detection, and
9559 * will do the right thing on next atomic commit
9563 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9564 __func__, acrtc->base.base.id);
9570 * TODO: Check VSDB bits to decide whether this should
9571 * be enabled or not.
9573 new_stream->triggered_crtc_reset.enabled =
9574 dm->force_timing_sync;
9576 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9578 ret = fill_hdr_info_packet(drm_new_conn_state,
9579 &new_stream->hdr_static_metadata);
9584 * If we already removed the old stream from the context
9585 * (and set the new stream to NULL) then we can't reuse
9586 * the old stream even if the stream and scaling are unchanged.
9587 * We'll hit the BUG_ON and black screen.
9589 * TODO: Refactor this function to allow this check to work
9590 * in all conditions.
9592 if (amdgpu_freesync_vid_mode &&
9593 dm_new_crtc_state->stream &&
9594 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9597 if (dm_new_crtc_state->stream &&
9598 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9599 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9600 new_crtc_state->mode_changed = false;
9601 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9602 new_crtc_state->mode_changed);
9606 /* mode_changed flag may get updated above, need to check again */
9607 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9611 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9612 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9613 "connectors_changed:%d\n",
9615 new_crtc_state->enable,
9616 new_crtc_state->active,
9617 new_crtc_state->planes_changed,
9618 new_crtc_state->mode_changed,
9619 new_crtc_state->active_changed,
9620 new_crtc_state->connectors_changed);
9622 /* Remove stream for any changed/disabled CRTC */
9625 if (!dm_old_crtc_state->stream)
9628 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9629 is_timing_unchanged_for_freesync(new_crtc_state,
9631 new_crtc_state->mode_changed = false;
9633 "Mode change not required for front porch change, "
9634 "setting mode_changed to %d",
9635 new_crtc_state->mode_changed);
9637 set_freesync_fixed_config(dm_new_crtc_state);
9640 } else if (amdgpu_freesync_vid_mode && aconnector &&
9641 is_freesync_video_mode(&new_crtc_state->mode,
9643 struct drm_display_mode *high_mode;
9645 high_mode = get_highest_refresh_rate_mode(aconnector, false);
9646 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9647 set_freesync_fixed_config(dm_new_crtc_state);
9651 ret = dm_atomic_get_state(state, &dm_state);
9655 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9658 /* i.e. reset mode */
9659 if (dc_remove_stream_from_ctx(
9662 dm_old_crtc_state->stream) != DC_OK) {
9667 dc_stream_release(dm_old_crtc_state->stream);
9668 dm_new_crtc_state->stream = NULL;
9670 reset_freesync_config_for_crtc(dm_new_crtc_state);
9672 *lock_and_validation_needed = true;
9674 } else {/* Add stream for any updated/enabled CRTC */
9676 * Quick fix to prevent NULL pointer on new_stream when
9677 * added MST connectors not found in existing crtc_state in the chained mode
9678 * TODO: need to dig out the root cause of that
9680 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9683 if (modereset_required(new_crtc_state))
9686 if (modeset_required(new_crtc_state, new_stream,
9687 dm_old_crtc_state->stream)) {
9689 WARN_ON(dm_new_crtc_state->stream);
9691 ret = dm_atomic_get_state(state, &dm_state);
9695 dm_new_crtc_state->stream = new_stream;
9697 dc_stream_retain(new_stream);
9699 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9702 if (dc_add_stream_to_ctx(
9705 dm_new_crtc_state->stream) != DC_OK) {
9710 *lock_and_validation_needed = true;
9715 /* Release extra reference */
9717 dc_stream_release(new_stream);
9720 * We want to do dc stream updates that do not require a
9721 * full modeset below.
9723 if (!(enable && aconnector && new_crtc_state->active))
9726 * Given above conditions, the dc state cannot be NULL because:
9727 * 1. We're in the process of enabling CRTCs (just been added
9728 * to the dc context, or already is on the context)
9729 * 2. Has a valid connector attached, and
9730 * 3. Is currently active and enabled.
9731 * => The dc stream state currently exists.
9733 BUG_ON(dm_new_crtc_state->stream == NULL);
9735 /* Scaling or underscan settings */
9736 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9737 drm_atomic_crtc_needs_modeset(new_crtc_state))
9738 update_stream_scaling_settings(
9739 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9742 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9745 * Color management settings. We also update color properties
9746 * when a modeset is needed, to ensure it gets reprogrammed.
9748 if (dm_new_crtc_state->base.color_mgmt_changed ||
9749 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9750 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9755 /* Update Freesync settings. */
9756 get_freesync_config_for_crtc(dm_new_crtc_state,
9763 dc_stream_release(new_stream);
9767 static bool should_reset_plane(struct drm_atomic_state *state,
9768 struct drm_plane *plane,
9769 struct drm_plane_state *old_plane_state,
9770 struct drm_plane_state *new_plane_state)
9772 struct drm_plane *other;
9773 struct drm_plane_state *old_other_state, *new_other_state;
9774 struct drm_crtc_state *new_crtc_state;
9778 * TODO: Remove this hack once the checks below are sufficient
9779 * enough to determine when we need to reset all the planes on
9782 if (state->allow_modeset)
9785 /* Exit early if we know that we're adding or removing the plane. */
9786 if (old_plane_state->crtc != new_plane_state->crtc)
9789 /* old crtc == new_crtc == NULL, plane not in context. */
9790 if (!new_plane_state->crtc)
9794 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9796 if (!new_crtc_state)
9799 /* CRTC Degamma changes currently require us to recreate planes. */
9800 if (new_crtc_state->color_mgmt_changed)
9803 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9807 * If there are any new primary or overlay planes being added or
9808 * removed then the z-order can potentially change. To ensure
9809 * correct z-order and pipe acquisition the current DC architecture
9810 * requires us to remove and recreate all existing planes.
9812 * TODO: Come up with a more elegant solution for this.
9814 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9815 struct amdgpu_framebuffer *old_afb, *new_afb;
9816 if (other->type == DRM_PLANE_TYPE_CURSOR)
9819 if (old_other_state->crtc != new_plane_state->crtc &&
9820 new_other_state->crtc != new_plane_state->crtc)
9823 if (old_other_state->crtc != new_other_state->crtc)
9826 /* Src/dst size and scaling updates. */
9827 if (old_other_state->src_w != new_other_state->src_w ||
9828 old_other_state->src_h != new_other_state->src_h ||
9829 old_other_state->crtc_w != new_other_state->crtc_w ||
9830 old_other_state->crtc_h != new_other_state->crtc_h)
9833 /* Rotation / mirroring updates. */
9834 if (old_other_state->rotation != new_other_state->rotation)
9837 /* Blending updates. */
9838 if (old_other_state->pixel_blend_mode !=
9839 new_other_state->pixel_blend_mode)
9842 /* Alpha updates. */
9843 if (old_other_state->alpha != new_other_state->alpha)
9846 /* Colorspace changes. */
9847 if (old_other_state->color_range != new_other_state->color_range ||
9848 old_other_state->color_encoding != new_other_state->color_encoding)
9851 /* Framebuffer checks fall at the end. */
9852 if (!old_other_state->fb || !new_other_state->fb)
9855 /* Pixel format changes can require bandwidth updates. */
9856 if (old_other_state->fb->format != new_other_state->fb->format)
9859 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9860 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9862 /* Tiling and DCC changes also require bandwidth updates. */
9863 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9864 old_afb->base.modifier != new_afb->base.modifier)
9871 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9872 struct drm_plane_state *new_plane_state,
9873 struct drm_framebuffer *fb)
9875 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9876 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9880 if (fb->width > new_acrtc->max_cursor_width ||
9881 fb->height > new_acrtc->max_cursor_height) {
9882 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9883 new_plane_state->fb->width,
9884 new_plane_state->fb->height);
9887 if (new_plane_state->src_w != fb->width << 16 ||
9888 new_plane_state->src_h != fb->height << 16) {
9889 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9893 /* Pitch in pixels */
9894 pitch = fb->pitches[0] / fb->format->cpp[0];
9896 if (fb->width != pitch) {
9897 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9906 /* FB pitch is supported by cursor plane */
9909 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9913 /* Core DRM takes care of checking FB modifiers, so we only need to
9914 * check tiling flags when the FB doesn't have a modifier. */
9915 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9916 if (adev->family < AMDGPU_FAMILY_AI) {
9917 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9918 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9919 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9921 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9924 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9932 static int dm_update_plane_state(struct dc *dc,
9933 struct drm_atomic_state *state,
9934 struct drm_plane *plane,
9935 struct drm_plane_state *old_plane_state,
9936 struct drm_plane_state *new_plane_state,
9938 bool *lock_and_validation_needed)
9941 struct dm_atomic_state *dm_state = NULL;
9942 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9943 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9944 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9945 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9946 struct amdgpu_crtc *new_acrtc;
9951 new_plane_crtc = new_plane_state->crtc;
9952 old_plane_crtc = old_plane_state->crtc;
9953 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9954 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9956 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9957 if (!enable || !new_plane_crtc ||
9958 drm_atomic_plane_disabling(plane->state, new_plane_state))
9961 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9963 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9964 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9968 if (new_plane_state->fb) {
9969 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9970 new_plane_state->fb);
9978 needs_reset = should_reset_plane(state, plane, old_plane_state,
9981 /* Remove any changed/removed planes */
9986 if (!old_plane_crtc)
9989 old_crtc_state = drm_atomic_get_old_crtc_state(
9990 state, old_plane_crtc);
9991 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9993 if (!dm_old_crtc_state->stream)
9996 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9997 plane->base.id, old_plane_crtc->base.id);
9999 ret = dm_atomic_get_state(state, &dm_state);
10003 if (!dc_remove_plane_from_context(
10005 dm_old_crtc_state->stream,
10006 dm_old_plane_state->dc_state,
10007 dm_state->context)) {
10013 dc_plane_state_release(dm_old_plane_state->dc_state);
10014 dm_new_plane_state->dc_state = NULL;
10016 *lock_and_validation_needed = true;
10018 } else { /* Add new planes */
10019 struct dc_plane_state *dc_new_plane_state;
10021 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10024 if (!new_plane_crtc)
10027 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10028 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10030 if (!dm_new_crtc_state->stream)
10036 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10040 WARN_ON(dm_new_plane_state->dc_state);
10042 dc_new_plane_state = dc_create_plane_state(dc);
10043 if (!dc_new_plane_state)
10046 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10047 plane->base.id, new_plane_crtc->base.id);
10049 ret = fill_dc_plane_attributes(
10050 drm_to_adev(new_plane_crtc->dev),
10051 dc_new_plane_state,
10055 dc_plane_state_release(dc_new_plane_state);
10059 ret = dm_atomic_get_state(state, &dm_state);
10061 dc_plane_state_release(dc_new_plane_state);
10066 * Any atomic check errors that occur after this will
10067 * not need a release. The plane state will be attached
10068 * to the stream, and therefore part of the atomic
10069 * state. It'll be released when the atomic state is
10072 if (!dc_add_plane_to_context(
10074 dm_new_crtc_state->stream,
10075 dc_new_plane_state,
10076 dm_state->context)) {
10078 dc_plane_state_release(dc_new_plane_state);
10082 dm_new_plane_state->dc_state = dc_new_plane_state;
10084 /* Tell DC to do a full surface update every time there
10085 * is a plane change. Inefficient, but works for now.
10087 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10089 *lock_and_validation_needed = true;
10096 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10097 struct drm_crtc *crtc,
10098 struct drm_crtc_state *new_crtc_state)
10100 struct drm_plane_state *new_cursor_state, *new_primary_state;
10101 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10103 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10104 * cursor per pipe but it's going to inherit the scaling and
10105 * positioning from the underlying pipe. Check the cursor plane's
10106 * blending properties match the primary plane's. */
10108 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10109 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10110 if (!new_cursor_state || !new_primary_state ||
10111 !new_cursor_state->fb || !new_primary_state->fb) {
10115 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10116 (new_cursor_state->src_w >> 16);
10117 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10118 (new_cursor_state->src_h >> 16);
10120 primary_scale_w = new_primary_state->crtc_w * 1000 /
10121 (new_primary_state->src_w >> 16);
10122 primary_scale_h = new_primary_state->crtc_h * 1000 /
10123 (new_primary_state->src_h >> 16);
10125 if (cursor_scale_w != primary_scale_w ||
10126 cursor_scale_h != primary_scale_h) {
10127 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10134 #if defined(CONFIG_DRM_AMD_DC_DCN)
10135 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10137 struct drm_connector *connector;
10138 struct drm_connector_state *conn_state;
10139 struct amdgpu_dm_connector *aconnector = NULL;
10141 for_each_new_connector_in_state(state, connector, conn_state, i) {
10142 if (conn_state->crtc != crtc)
10145 aconnector = to_amdgpu_dm_connector(connector);
10146 if (!aconnector->port || !aconnector->mst_port)
10155 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10159 static int validate_overlay(struct drm_atomic_state *state)
10162 struct drm_plane *plane;
10163 struct drm_plane_state *new_plane_state;
10164 struct drm_plane_state *primary_state, *overlay_state = NULL;
10166 /* Check if primary plane is contained inside overlay */
10167 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10168 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10169 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10172 overlay_state = new_plane_state;
10177 /* check if we're making changes to the overlay plane */
10178 if (!overlay_state)
10181 /* check if overlay plane is enabled */
10182 if (!overlay_state->crtc)
10185 /* find the primary plane for the CRTC that the overlay is enabled on */
10186 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10187 if (IS_ERR(primary_state))
10188 return PTR_ERR(primary_state);
10190 /* check if primary plane is enabled */
10191 if (!primary_state->crtc)
10194 /* Perform the bounds check to ensure the overlay plane covers the primary */
10195 if (primary_state->crtc_x < overlay_state->crtc_x ||
10196 primary_state->crtc_y < overlay_state->crtc_y ||
10197 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10198 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10199 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10207 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10208 * @dev: The DRM device
10209 * @state: The atomic state to commit
10211 * Validate that the given atomic state is programmable by DC into hardware.
10212 * This involves constructing a &struct dc_state reflecting the new hardware
10213 * state we wish to commit, then querying DC to see if it is programmable. It's
10214 * important not to modify the existing DC state. Otherwise, atomic_check
10215 * may unexpectedly commit hardware changes.
10217 * When validating the DC state, it's important that the right locks are
10218 * acquired. For full updates case which removes/adds/updates streams on one
10219 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10220 * that any such full update commit will wait for completion of any outstanding
10221 * flip using DRMs synchronization events.
10223 * Note that DM adds the affected connectors for all CRTCs in state, when that
10224 * might not seem necessary. This is because DC stream creation requires the
10225 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10226 * be possible but non-trivial - a possible TODO item.
10228 * Return: -Error code if validation failed.
10230 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10231 struct drm_atomic_state *state)
10233 struct amdgpu_device *adev = drm_to_adev(dev);
10234 struct dm_atomic_state *dm_state = NULL;
10235 struct dc *dc = adev->dm.dc;
10236 struct drm_connector *connector;
10237 struct drm_connector_state *old_con_state, *new_con_state;
10238 struct drm_crtc *crtc;
10239 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10240 struct drm_plane *plane;
10241 struct drm_plane_state *old_plane_state, *new_plane_state;
10242 enum dc_status status;
10244 bool lock_and_validation_needed = false;
10245 struct dm_crtc_state *dm_old_crtc_state;
10247 trace_amdgpu_dm_atomic_check_begin(state);
10249 ret = drm_atomic_helper_check_modeset(dev, state);
10253 /* Check connector changes */
10254 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10255 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10256 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10258 /* Skip connectors that are disabled or part of modeset already. */
10259 if (!old_con_state->crtc && !new_con_state->crtc)
10262 if (!new_con_state->crtc)
10265 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10266 if (IS_ERR(new_crtc_state)) {
10267 ret = PTR_ERR(new_crtc_state);
10271 if (dm_old_con_state->abm_level !=
10272 dm_new_con_state->abm_level)
10273 new_crtc_state->connectors_changed = true;
10276 #if defined(CONFIG_DRM_AMD_DC_DCN)
10277 if (dc_resource_is_dsc_encoding_supported(dc)) {
10278 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10279 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10280 ret = add_affected_mst_dsc_crtcs(state, crtc);
10287 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10288 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10290 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10291 !new_crtc_state->color_mgmt_changed &&
10292 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10293 dm_old_crtc_state->dsc_force_changed == false)
10296 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10300 if (!new_crtc_state->enable)
10303 ret = drm_atomic_add_affected_connectors(state, crtc);
10307 ret = drm_atomic_add_affected_planes(state, crtc);
10311 if (dm_old_crtc_state->dsc_force_changed)
10312 new_crtc_state->mode_changed = true;
10316 * Add all primary and overlay planes on the CRTC to the state
10317 * whenever a plane is enabled to maintain correct z-ordering
10318 * and to enable fast surface updates.
10320 drm_for_each_crtc(crtc, dev) {
10321 bool modified = false;
10323 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10324 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10327 if (new_plane_state->crtc == crtc ||
10328 old_plane_state->crtc == crtc) {
10337 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10338 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10342 drm_atomic_get_plane_state(state, plane);
10344 if (IS_ERR(new_plane_state)) {
10345 ret = PTR_ERR(new_plane_state);
10351 /* Remove exiting planes if they are modified */
10352 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10353 ret = dm_update_plane_state(dc, state, plane,
10357 &lock_and_validation_needed);
10362 /* Disable all crtcs which require disable */
10363 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10364 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10368 &lock_and_validation_needed);
10373 /* Enable all crtcs which require enable */
10374 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10375 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10379 &lock_and_validation_needed);
10384 ret = validate_overlay(state);
10388 /* Add new/modified planes */
10389 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10390 ret = dm_update_plane_state(dc, state, plane,
10394 &lock_and_validation_needed);
10399 /* Run this here since we want to validate the streams we created */
10400 ret = drm_atomic_helper_check_planes(dev, state);
10404 /* Check cursor planes scaling */
10405 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10406 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10411 if (state->legacy_cursor_update) {
10413 * This is a fast cursor update coming from the plane update
10414 * helper, check if it can be done asynchronously for better
10417 state->async_update =
10418 !drm_atomic_helper_async_check(dev, state);
10421 * Skip the remaining global validation if this is an async
10422 * update. Cursor updates can be done without affecting
10423 * state or bandwidth calcs and this avoids the performance
10424 * penalty of locking the private state object and
10425 * allocating a new dc_state.
10427 if (state->async_update)
10431 /* Check scaling and underscan changes*/
10432 /* TODO Removed scaling changes validation due to inability to commit
10433 * new stream into context w\o causing full reset. Need to
10434 * decide how to handle.
10436 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10437 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10438 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10439 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10441 /* Skip any modesets/resets */
10442 if (!acrtc || drm_atomic_crtc_needs_modeset(
10443 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10446 /* Skip any thing not scale or underscan changes */
10447 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10450 lock_and_validation_needed = true;
10454 * Streams and planes are reset when there are changes that affect
10455 * bandwidth. Anything that affects bandwidth needs to go through
10456 * DC global validation to ensure that the configuration can be applied
10459 * We have to currently stall out here in atomic_check for outstanding
10460 * commits to finish in this case because our IRQ handlers reference
10461 * DRM state directly - we can end up disabling interrupts too early
10464 * TODO: Remove this stall and drop DM state private objects.
10466 if (lock_and_validation_needed) {
10467 ret = dm_atomic_get_state(state, &dm_state);
10471 ret = do_aquire_global_lock(dev, state);
10475 #if defined(CONFIG_DRM_AMD_DC_DCN)
10476 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10479 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10485 * Perform validation of MST topology in the state:
10486 * We need to perform MST atomic check before calling
10487 * dc_validate_global_state(), or there is a chance
10488 * to get stuck in an infinite loop and hang eventually.
10490 ret = drm_dp_mst_atomic_check(state);
10493 status = dc_validate_global_state(dc, dm_state->context, false);
10494 if (status != DC_OK) {
10495 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10496 dc_status_to_str(status), status);
10502 * The commit is a fast update. Fast updates shouldn't change
10503 * the DC context, affect global validation, and can have their
10504 * commit work done in parallel with other commits not touching
10505 * the same resource. If we have a new DC context as part of
10506 * the DM atomic state from validation we need to free it and
10507 * retain the existing one instead.
10509 * Furthermore, since the DM atomic state only contains the DC
10510 * context and can safely be annulled, we can free the state
10511 * and clear the associated private object now to free
10512 * some memory and avoid a possible use-after-free later.
10515 for (i = 0; i < state->num_private_objs; i++) {
10516 struct drm_private_obj *obj = state->private_objs[i].ptr;
10518 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10519 int j = state->num_private_objs-1;
10521 dm_atomic_destroy_state(obj,
10522 state->private_objs[i].state);
10524 /* If i is not at the end of the array then the
10525 * last element needs to be moved to where i was
10526 * before the array can safely be truncated.
10529 state->private_objs[i] =
10530 state->private_objs[j];
10532 state->private_objs[j].ptr = NULL;
10533 state->private_objs[j].state = NULL;
10534 state->private_objs[j].old_state = NULL;
10535 state->private_objs[j].new_state = NULL;
10537 state->num_private_objs = j;
10543 /* Store the overall update type for use later in atomic check. */
10544 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10545 struct dm_crtc_state *dm_new_crtc_state =
10546 to_dm_crtc_state(new_crtc_state);
10548 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10553 /* Must be success */
10556 trace_amdgpu_dm_atomic_check_finish(state, ret);
10561 if (ret == -EDEADLK)
10562 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10563 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10564 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10566 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10568 trace_amdgpu_dm_atomic_check_finish(state, ret);
10573 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10574 struct amdgpu_dm_connector *amdgpu_dm_connector)
10577 bool capable = false;
10579 if (amdgpu_dm_connector->dc_link &&
10580 dm_helpers_dp_read_dpcd(
10582 amdgpu_dm_connector->dc_link,
10583 DP_DOWN_STREAM_PORT_COUNT,
10585 sizeof(dpcd_data))) {
10586 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10592 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10593 unsigned int offset,
10594 unsigned int total_length,
10596 unsigned int length,
10597 struct amdgpu_hdmi_vsdb_info *vsdb)
10600 union dmub_rb_cmd cmd;
10601 struct dmub_cmd_send_edid_cea *input;
10602 struct dmub_cmd_edid_cea_output *output;
10604 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10607 memset(&cmd, 0, sizeof(cmd));
10609 input = &cmd.edid_cea.data.input;
10611 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10612 cmd.edid_cea.header.sub_type = 0;
10613 cmd.edid_cea.header.payload_bytes =
10614 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10615 input->offset = offset;
10616 input->length = length;
10617 input->total_length = total_length;
10618 memcpy(input->payload, data, length);
10620 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10622 DRM_ERROR("EDID CEA parser failed\n");
10626 output = &cmd.edid_cea.data.output;
10628 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10629 if (!output->ack.success) {
10630 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10631 output->ack.offset);
10633 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10634 if (!output->amd_vsdb.vsdb_found)
10637 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10638 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10639 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10640 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10642 DRM_WARN("Unknown EDID CEA parser results\n");
10649 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10650 uint8_t *edid_ext, int len,
10651 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10655 /* send extension block to DMCU for parsing */
10656 for (i = 0; i < len; i += 8) {
10660 /* send 8 bytes a time */
10661 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10665 /* EDID block sent completed, expect result */
10666 int version, min_rate, max_rate;
10668 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10670 /* amd vsdb found */
10671 vsdb_info->freesync_supported = 1;
10672 vsdb_info->amd_vsdb_version = version;
10673 vsdb_info->min_refresh_rate_hz = min_rate;
10674 vsdb_info->max_refresh_rate_hz = max_rate;
10682 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10690 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10691 uint8_t *edid_ext, int len,
10692 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10696 /* send extension block to DMCU for parsing */
10697 for (i = 0; i < len; i += 8) {
10698 /* send 8 bytes a time */
10699 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10703 return vsdb_info->freesync_supported;
10706 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10707 uint8_t *edid_ext, int len,
10708 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10710 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10712 if (adev->dm.dmub_srv)
10713 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10715 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10718 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10719 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10721 uint8_t *edid_ext = NULL;
10723 bool valid_vsdb_found = false;
10725 /*----- drm_find_cea_extension() -----*/
10726 /* No EDID or EDID extensions */
10727 if (edid == NULL || edid->extensions == 0)
10730 /* Find CEA extension */
10731 for (i = 0; i < edid->extensions; i++) {
10732 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10733 if (edid_ext[0] == CEA_EXT)
10737 if (i == edid->extensions)
10740 /*----- cea_db_offsets() -----*/
10741 if (edid_ext[0] != CEA_EXT)
10744 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10746 return valid_vsdb_found ? i : -ENODEV;
10749 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10753 struct detailed_timing *timing;
10754 struct detailed_non_pixel *data;
10755 struct detailed_data_monitor_range *range;
10756 struct amdgpu_dm_connector *amdgpu_dm_connector =
10757 to_amdgpu_dm_connector(connector);
10758 struct dm_connector_state *dm_con_state = NULL;
10760 struct drm_device *dev = connector->dev;
10761 struct amdgpu_device *adev = drm_to_adev(dev);
10762 bool freesync_capable = false;
10763 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10765 if (!connector->state) {
10766 DRM_ERROR("%s - Connector has no state", __func__);
10771 dm_con_state = to_dm_connector_state(connector->state);
10773 amdgpu_dm_connector->min_vfreq = 0;
10774 amdgpu_dm_connector->max_vfreq = 0;
10775 amdgpu_dm_connector->pixel_clock_mhz = 0;
10780 dm_con_state = to_dm_connector_state(connector->state);
10782 if (!amdgpu_dm_connector->dc_sink) {
10783 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10786 if (!adev->dm.freesync_module)
10790 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10791 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10792 bool edid_check_required = false;
10795 edid_check_required = is_dp_capable_without_timing_msa(
10797 amdgpu_dm_connector);
10800 if (edid_check_required == true && (edid->version > 1 ||
10801 (edid->version == 1 && edid->revision > 1))) {
10802 for (i = 0; i < 4; i++) {
10804 timing = &edid->detailed_timings[i];
10805 data = &timing->data.other_data;
10806 range = &data->data.range;
10808 * Check if monitor has continuous frequency mode
10810 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10813 * Check for flag range limits only. If flag == 1 then
10814 * no additional timing information provided.
10815 * Default GTF, GTF Secondary curve and CVT are not
10818 if (range->flags != 1)
10821 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10822 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10823 amdgpu_dm_connector->pixel_clock_mhz =
10824 range->pixel_clock_mhz * 10;
10826 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10827 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10832 if (amdgpu_dm_connector->max_vfreq -
10833 amdgpu_dm_connector->min_vfreq > 10) {
10835 freesync_capable = true;
10838 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10839 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10840 if (i >= 0 && vsdb_info.freesync_supported) {
10841 timing = &edid->detailed_timings[i];
10842 data = &timing->data.other_data;
10844 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10845 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10846 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10847 freesync_capable = true;
10849 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10850 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10856 dm_con_state->freesync_capable = freesync_capable;
10858 if (connector->vrr_capable_property)
10859 drm_connector_set_vrr_capable_property(connector,
10863 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10865 struct amdgpu_device *adev = drm_to_adev(dev);
10866 struct dc *dc = adev->dm.dc;
10869 mutex_lock(&adev->dm.dc_lock);
10870 if (dc->current_state) {
10871 for (i = 0; i < dc->current_state->stream_count; ++i)
10872 dc->current_state->streams[i]
10873 ->triggered_crtc_reset.enabled =
10874 adev->dm.force_timing_sync;
10876 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10877 dc_trigger_sync(dc, dc->current_state);
10879 mutex_unlock(&adev->dm.dc_lock);
10882 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10883 uint32_t value, const char *func_name)
10885 #ifdef DM_CHECK_ADDR_0
10886 if (address == 0) {
10887 DC_ERR("invalid register write. address = 0");
10891 cgs_write_register(ctx->cgs_device, address, value);
10892 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10895 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10896 const char *func_name)
10899 #ifdef DM_CHECK_ADDR_0
10900 if (address == 0) {
10901 DC_ERR("invalid register read; address = 0\n");
10906 if (ctx->dmub_srv &&
10907 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10908 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10913 value = cgs_read_register(ctx->cgs_device, address);
10915 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10920 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10921 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10923 struct amdgpu_device *adev = ctx->driver_context;
10926 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10927 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10929 *operation_result = AUX_RET_ERROR_TIMEOUT;
10932 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10934 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10935 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10937 // For read case, Copy data to payload
10938 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10939 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10940 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10941 adev->dm.dmub_notify->aux_reply.length);
10944 return adev->dm.dmub_notify->aux_reply.length;