2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
52 #include "amdgpu_pm.h"
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
61 #include "amdgpu_dm_psr.h"
63 #include "ivsrcid/ivsrcid_vislands30.h"
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
92 #include "soc15_common.h"
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133 * requests into DC requests, and DC responses into DRM responses.
135 * The root control structure is &struct amdgpu_display_manager.
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 switch (link->dpcd_caps.dongle_type) {
146 case DISPLAY_DONGLE_NONE:
147 return DRM_MODE_SUBCONNECTOR_Native;
148 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 return DRM_MODE_SUBCONNECTOR_VGA;
150 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_DVID;
153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_HDMIA;
156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 return DRM_MODE_SUBCONNECTOR_Unknown;
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 struct dc_link *link = aconnector->dc_link;
165 struct drm_connector *connector = &aconnector->base;
166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 if (aconnector->dc_sink)
172 subconnector = get_subconnector_type(link);
174 drm_object_property_set_value(&connector->base,
175 connector->dev->mode_config.dp_subconnector_property,
180 * initializes drm_device display related structures, based on the information
181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182 * drm_encoder, drm_mode_config
184 * Returns 0 on success
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 struct drm_plane *plane,
192 unsigned long possible_crtcs,
193 const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 struct drm_plane *plane,
196 uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 struct amdgpu_dm_connector *amdgpu_dm_connector,
200 struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 struct amdgpu_encoder *aencoder,
203 uint32_t link_index);
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 struct drm_atomic_state *state);
212 static void handle_cursor_update(struct drm_plane *plane,
213 struct drm_plane_state *old_plane_state);
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 struct drm_crtc_state *new_crtc_state);
222 * dm_vblank_get_counter
225 * Get counter for number of vertical blanks
228 * struct amdgpu_device *adev - [in] desired amdgpu device
229 * int disp_idx - [in] which CRTC to get the counter from
232 * Counter for vertical blanks
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
236 if (crtc >= adev->mode_info.num_crtc)
239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
241 if (acrtc->dm_irq_params.stream == NULL) {
242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 u32 *vbl, u32 *position)
254 uint32_t v_blank_start, v_blank_end, h_position, v_position;
256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
261 if (acrtc->dm_irq_params.stream == NULL) {
262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 * TODO rework base driver to use values directly.
269 * for now parse it back into reg-format
271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 *position = v_position | (h_position << 16);
278 *vbl = v_blank_start | (v_blank_end << 16);
284 static bool dm_is_idle(void *handle)
290 static int dm_wait_for_idle(void *handle)
296 static bool dm_check_soft_reset(void *handle)
301 static int dm_soft_reset(void *handle)
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 struct drm_device *dev = adev_to_drm(adev);
312 struct drm_crtc *crtc;
313 struct amdgpu_crtc *amdgpu_crtc;
315 if (WARN_ON(otg_inst == -1))
316 return adev->mode_info.crtcs[0];
318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 amdgpu_crtc = to_amdgpu_crtc(crtc);
321 if (amdgpu_crtc->otg_inst == otg_inst)
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 return acrtc->dm_irq_params.freesync_config.state ==
331 VRR_STATE_ACTIVE_VARIABLE ||
332 acrtc->dm_irq_params.freesync_config.state ==
333 VRR_STATE_ACTIVE_FIXED;
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 struct dm_crtc_state *new_state)
345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
354 * dm_pflip_high_irq() - Handle pageflip interrupt
355 * @interrupt_params: ignored
357 * Handles the pageflip interrupt by notifying all interested parties
358 * that the pageflip has been completed.
360 static void dm_pflip_high_irq(void *interrupt_params)
362 struct amdgpu_crtc *amdgpu_crtc;
363 struct common_irq_params *irq_params = interrupt_params;
364 struct amdgpu_device *adev = irq_params->adev;
366 struct drm_pending_vblank_event *e;
367 uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
372 /* IRQ could occur when in initial stage */
373 /* TODO work and BO cleanup */
374 if (amdgpu_crtc == NULL) {
375 DC_LOG_PFLIP("CRTC is null, returning.\n");
379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 amdgpu_crtc->pflip_status,
384 AMDGPU_FLIP_SUBMITTED,
385 amdgpu_crtc->crtc_id,
387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 /* page flip completed. */
392 e = amdgpu_crtc->event;
393 amdgpu_crtc->event = NULL;
397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404 /* Update to correct count and vblank timestamp if racing with
405 * vblank irq. This also updates to the correct vblank timestamp
406 * even in VRR mode, as scanout is past the front-porch atm.
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 /* Wake up userspace by sending the pageflip event with proper
411 * count and timestamp of vblank of flip completion.
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 /* Event sent, so done with vblank for this flip */
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 /* VRR active and inside front-porch: vblank count and
421 * timestamp for pageflip event will only be up to date after
422 * drm_crtc_handle_vblank() has been executed from late vblank
423 * irq handler after start of back-porch (vline 0). We queue the
424 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 * updated timestamp and count, once it runs after us.
427 * We need to open-code this instead of using the helper
428 * drm_crtc_arm_vblank_event(), as that helper would
429 * call drm_crtc_accurate_vblank_count(), which we must
430 * not call in VRR mode while we are in front-porch!
433 /* sequence will be replaced by real count during send-out. */
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 /* Keep track of vblank of this flip for flip throttling. We use the
442 * cooked hw counter, as that one incremented at start of this vblank
443 * of pageflip completion, so last_flip_vblank is the forbidden count
444 * for queueing new pageflips if vsync + VRR is enabled.
446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
457 static void dm_vupdate_high_irq(void *interrupt_params)
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
462 struct drm_device *drm_dev;
463 struct drm_vblank_crtc *vblank;
464 ktime_t frame_duration_ns, previous_timestamp;
468 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 drm_dev = acrtc->base.dev;
473 vblank = &drm_dev->vblank[acrtc->base.index];
474 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 frame_duration_ns = vblank->time - previous_timestamp;
477 if (frame_duration_ns > 0) {
478 trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 /* Core vblank handling is done here after end of front-porch in
489 * vrr mode, as vblank timestamping will give valid results
490 * while now done after front-porch. This will also deliver
491 * page-flip completion events that have been queued to us
492 * if a pageflip happened inside front-porch.
495 drm_crtc_handle_vblank(&acrtc->base);
497 /* BTR processing for pre-DCE12 ASICs */
498 if (acrtc->dm_irq_params.stream &&
499 adev->family < AMDGPU_FAMILY_AI) {
500 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 mod_freesync_handle_v_update(
502 adev->dm.freesync_module,
503 acrtc->dm_irq_params.stream,
504 &acrtc->dm_irq_params.vrr_params);
506 dc_stream_adjust_vmin_vmax(
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params.adjust);
510 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
517 * dm_crtc_high_irq() - Handles CRTC interrupt
518 * @interrupt_params: used for determining the CRTC instance
520 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523 static void dm_crtc_high_irq(void *interrupt_params)
525 struct common_irq_params *irq_params = interrupt_params;
526 struct amdgpu_device *adev = irq_params->adev;
527 struct amdgpu_crtc *acrtc;
531 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 vrr_active, acrtc->dm_irq_params.active_planes);
541 * Core vblank handling at start of front-porch is only possible
542 * in non-vrr mode, as only there vblank timestamping will give
543 * valid results while done in front-porch. Otherwise defer it
544 * to dm_vupdate_high_irq after end of front-porch.
547 drm_crtc_handle_vblank(&acrtc->base);
550 * Following stuff must happen at start of vblank, for crc
551 * computation and below-the-range btr support in vrr mode.
553 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555 /* BTR updates need to happen before VUPDATE on Vega and above. */
556 if (adev->family < AMDGPU_FAMILY_AI)
559 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561 if (acrtc->dm_irq_params.stream &&
562 acrtc->dm_irq_params.vrr_params.supported &&
563 acrtc->dm_irq_params.freesync_config.state ==
564 VRR_STATE_ACTIVE_VARIABLE) {
565 mod_freesync_handle_v_update(adev->dm.freesync_module,
566 acrtc->dm_irq_params.stream,
567 &acrtc->dm_irq_params.vrr_params);
569 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params.adjust);
574 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 * In that case, pageflip completion interrupts won't fire and pageflip
576 * completion events won't get delivered. Prevent this by sending
577 * pending pageflip events from here if a flip is still pending.
579 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 * avoid race conditions between flip programming and completion,
581 * which could cause too early flip completion events.
583 if (adev->family >= AMDGPU_FAMILY_RV &&
584 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 acrtc->dm_irq_params.active_planes == 0) {
587 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 drm_crtc_vblank_put(&acrtc->base);
591 acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601 * DCN generation ASICs
602 * @interrupt_params: interrupt parameters
604 * Used to set crc window/read out crc value at vertical line 0 position
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 struct common_irq_params *irq_params = interrupt_params;
609 struct amdgpu_device *adev = irq_params->adev;
610 struct amdgpu_crtc *acrtc;
612 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
617 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
622 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
623 * @interrupt_params: used for determining the Outbox instance
625 * Handles the Outbox Interrupt
628 #define DMUB_TRACE_MAX_READ 64
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
631 struct dmub_notification notify;
632 struct common_irq_params *irq_params = interrupt_params;
633 struct amdgpu_device *adev = irq_params->adev;
634 struct amdgpu_display_manager *dm = &adev->dm;
635 struct dmcub_trace_buf_entry entry = { 0 };
638 if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
641 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
642 } while (notify.pending_notification);
644 if (adev->dm.dmub_notify)
645 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
646 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 complete(&adev->dm.dmub_aux_transfer_done);
648 // TODO : HPD Implementation
651 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
657 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 entry.param0, entry.param1);
661 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
668 } while (count <= DMUB_TRACE_MAX_READ);
670 ASSERT(count <= DMUB_TRACE_MAX_READ);
674 static int dm_set_clockgating_state(void *handle,
675 enum amd_clockgating_state state)
680 static int dm_set_powergating_state(void *handle,
681 enum amd_powergating_state state)
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
689 /* Allocate memory for FBC compressed data */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
692 struct drm_device *dev = connector->dev;
693 struct amdgpu_device *adev = drm_to_adev(dev);
694 struct dm_compressor_info *compressor = &adev->dm.compressor;
695 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 struct drm_display_mode *mode;
697 unsigned long max_size = 0;
699 if (adev->dm.dc->fbc_compressor == NULL)
702 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
705 if (compressor->bo_ptr)
709 list_for_each_entry(mode, &connector->modes, head) {
710 if (max_size < mode->htotal * mode->vtotal)
711 max_size = mode->htotal * mode->vtotal;
715 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 &compressor->gpu_addr, &compressor->cpu_addr);
720 DRM_ERROR("DM: Failed to initialize FBC\n");
722 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 int pipe, bool *enabled,
732 unsigned char *buf, int max_bytes)
734 struct drm_device *dev = dev_get_drvdata(kdev);
735 struct amdgpu_device *adev = drm_to_adev(dev);
736 struct drm_connector *connector;
737 struct drm_connector_list_iter conn_iter;
738 struct amdgpu_dm_connector *aconnector;
743 mutex_lock(&adev->dm.audio_lock);
745 drm_connector_list_iter_begin(dev, &conn_iter);
746 drm_for_each_connector_iter(connector, &conn_iter) {
747 aconnector = to_amdgpu_dm_connector(connector);
748 if (aconnector->audio_inst != port)
752 ret = drm_eld_size(connector->eld);
753 memcpy(buf, connector->eld, min(max_bytes, ret));
757 drm_connector_list_iter_end(&conn_iter);
759 mutex_unlock(&adev->dm.audio_lock);
761 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 .get_eld = amdgpu_dm_audio_component_get_eld,
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 struct device *hda_kdev, void *data)
773 struct drm_device *dev = dev_get_drvdata(kdev);
774 struct amdgpu_device *adev = drm_to_adev(dev);
775 struct drm_audio_component *acomp = data;
777 acomp->ops = &amdgpu_dm_audio_component_ops;
779 adev->dm.audio_component = acomp;
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 struct device *hda_kdev, void *data)
787 struct drm_device *dev = dev_get_drvdata(kdev);
788 struct amdgpu_device *adev = drm_to_adev(dev);
789 struct drm_audio_component *acomp = data;
793 adev->dm.audio_component = NULL;
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 .bind = amdgpu_dm_audio_component_bind,
798 .unbind = amdgpu_dm_audio_component_unbind,
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
808 adev->mode_info.audio.enabled = true;
810 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
812 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 adev->mode_info.audio.pin[i].channels = -1;
814 adev->mode_info.audio.pin[i].rate = -1;
815 adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 adev->mode_info.audio.pin[i].status_bits = 0;
817 adev->mode_info.audio.pin[i].category_code = 0;
818 adev->mode_info.audio.pin[i].connected = false;
819 adev->mode_info.audio.pin[i].id =
820 adev->dm.dc->res_pool->audios[i]->inst;
821 adev->mode_info.audio.pin[i].offset = 0;
824 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
828 adev->dm.audio_registered = true;
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
838 if (!adev->mode_info.audio.enabled)
841 if (adev->dm.audio_registered) {
842 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 adev->dm.audio_registered = false;
846 /* TODO: Disable audio? */
848 adev->mode_info.audio.enabled = false;
851 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
853 struct drm_audio_component *acomp = adev->dm.audio_component;
855 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
858 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
865 const struct dmcub_firmware_header_v1_0 *hdr;
866 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 struct abm *abm = adev->dm.dc->res_pool->abm;
871 struct dmub_srv_hw_params hw_params;
872 enum dmub_status status;
873 const unsigned char *fw_inst_const, *fw_bss_data;
874 uint32_t i, fw_inst_const_size, fw_bss_data_size;
878 /* DMUB isn't supported on the ASIC. */
882 DRM_ERROR("No framebuffer info for DMUB service.\n");
887 /* Firmware required for DMUB support. */
888 DRM_ERROR("No firmware provided for DMUB.\n");
892 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 if (status != DMUB_STATUS_OK) {
894 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
898 if (!has_hw_support) {
899 DRM_INFO("DMUB unsupported on ASIC\n");
903 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
905 fw_inst_const = dmub_fw->data +
906 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
909 fw_bss_data = dmub_fw->data +
910 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 le32_to_cpu(hdr->inst_const_bytes);
913 /* Copy firmware and bios info into FB memory. */
914 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
917 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
919 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 * amdgpu_ucode_init_single_fw will load dmub firmware
921 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 * will be done by dm_dmub_hw_init
924 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
929 if (fw_bss_data_size)
930 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 fw_bss_data, fw_bss_data_size);
933 /* Copy firmware bios info into FB memory. */
934 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 /* Reset regions that need to be reset. */
938 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
941 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
944 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
947 /* Initialize hardware. */
948 memset(&hw_params, 0, sizeof(hw_params));
949 hw_params.fb_base = adev->gmc.fb_start;
950 hw_params.fb_offset = adev->gmc.aper_base;
952 /* backdoor load firmware and trigger dmub running */
953 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 hw_params.load_inst_const = true;
957 hw_params.psp_version = dmcu->psp_version;
959 for (i = 0; i < fb_info->num_fb; ++i)
960 hw_params.fb[i] = &fb_info->fb[i];
962 status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 if (status != DMUB_STATUS_OK) {
964 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
968 /* Wait for firmware load to finish. */
969 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 if (status != DMUB_STATUS_OK)
971 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
973 /* Init DMCU and ABM if available. */
975 dmcu->funcs->dmcu_init(dmcu);
976 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 if (!adev->dm.dc->ctx->dmub_srv)
980 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 if (!adev->dm.dc->ctx->dmub_srv) {
982 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
986 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 adev->dm.dmcub_fw_version);
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 uint32_t logical_addr_low;
997 uint32_t logical_addr_high;
998 uint32_t agp_base, agp_bot, agp_top;
999 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1001 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1004 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1006 * Raven2 has a HW issue that it is unable to use the vram which
1007 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 * workaround that increase system aperture high address (add 1)
1009 * to get rid of the VM fault and hardware hang.
1011 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1013 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1016 agp_bot = adev->gmc.agp_start >> 24;
1017 agp_top = adev->gmc.agp_end >> 24;
1020 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 page_table_base.low_part = lower_32_bits(pt_base);
1027 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1030 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1034 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1038 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1042 pa_config->is_hvm_enabled = 0;
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void event_mall_stutter(struct work_struct *work)
1050 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1051 struct amdgpu_display_manager *dm = vblank_work->dm;
1053 mutex_lock(&dm->dc_lock);
1055 if (vblank_work->enable)
1056 dm->active_vblank_irq_count++;
1057 else if(dm->active_vblank_irq_count)
1058 dm->active_vblank_irq_count--;
1060 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1062 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1064 mutex_unlock(&dm->dc_lock);
1067 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1070 int max_caps = dc->caps.max_links;
1071 struct vblank_workqueue *vblank_work;
1074 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1075 if (ZERO_OR_NULL_PTR(vblank_work)) {
1080 for (i = 0; i < max_caps; i++)
1081 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1086 static int amdgpu_dm_init(struct amdgpu_device *adev)
1088 struct dc_init_data init_data;
1089 #ifdef CONFIG_DRM_AMD_DC_HDCP
1090 struct dc_callback_init init_params;
1094 adev->dm.ddev = adev_to_drm(adev);
1095 adev->dm.adev = adev;
1097 /* Zero all the fields */
1098 memset(&init_data, 0, sizeof(init_data));
1099 #ifdef CONFIG_DRM_AMD_DC_HDCP
1100 memset(&init_params, 0, sizeof(init_params));
1103 mutex_init(&adev->dm.dc_lock);
1104 mutex_init(&adev->dm.audio_lock);
1105 #if defined(CONFIG_DRM_AMD_DC_DCN)
1106 spin_lock_init(&adev->dm.vblank_lock);
1109 if(amdgpu_dm_irq_init(adev)) {
1110 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1114 init_data.asic_id.chip_family = adev->family;
1116 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1117 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1119 init_data.asic_id.vram_width = adev->gmc.vram_width;
1120 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1121 init_data.asic_id.atombios_base_address =
1122 adev->mode_info.atom_context->bios;
1124 init_data.driver = adev;
1126 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1128 if (!adev->dm.cgs_device) {
1129 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1133 init_data.cgs_device = adev->dm.cgs_device;
1135 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1137 switch (adev->asic_type) {
1142 init_data.flags.gpu_vm_support = true;
1143 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1144 init_data.flags.disable_dmcu = true;
1147 case CHIP_YELLOW_CARP:
1148 init_data.flags.gpu_vm_support = true;
1154 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1155 init_data.flags.fbc_support = true;
1157 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1158 init_data.flags.multi_mon_pp_mclk_switch = true;
1160 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1161 init_data.flags.disable_fractional_pwm = true;
1163 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1164 init_data.flags.edp_no_power_sequencing = true;
1166 init_data.flags.power_down_display_on_boot = true;
1168 INIT_LIST_HEAD(&adev->dm.da_list);
1169 /* Display Core create. */
1170 adev->dm.dc = dc_create(&init_data);
1173 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1175 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1179 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1180 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1181 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1184 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1185 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1187 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1188 adev->dm.dc->debug.disable_stutter = true;
1190 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1191 adev->dm.dc->debug.disable_dsc = true;
1193 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1194 adev->dm.dc->debug.disable_clock_gate = true;
1196 r = dm_dmub_hw_init(adev);
1198 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1202 dc_hardware_init(adev->dm.dc);
1204 #if defined(CONFIG_DRM_AMD_DC_DCN)
1205 if (adev->apu_flags) {
1206 struct dc_phy_addr_space_config pa_config;
1208 mmhub_read_system_context(adev, &pa_config);
1210 // Call the DC init_memory func
1211 dc_setup_system_context(adev->dm.dc, &pa_config);
1215 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1216 if (!adev->dm.freesync_module) {
1218 "amdgpu: failed to initialize freesync_module.\n");
1220 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1221 adev->dm.freesync_module);
1223 amdgpu_dm_init_color_mod();
1225 #if defined(CONFIG_DRM_AMD_DC_DCN)
1226 if (adev->dm.dc->caps.max_links > 0) {
1227 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1229 if (!adev->dm.vblank_workqueue)
1230 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1232 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1236 #ifdef CONFIG_DRM_AMD_DC_HDCP
1237 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1238 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1240 if (!adev->dm.hdcp_workqueue)
1241 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1243 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1245 dc_init_callbacks(adev->dm.dc, &init_params);
1248 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1249 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1251 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1252 init_completion(&adev->dm.dmub_aux_transfer_done);
1253 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1254 if (!adev->dm.dmub_notify) {
1255 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1258 amdgpu_dm_outbox_init(adev);
1261 if (amdgpu_dm_initialize_drm_device(adev)) {
1263 "amdgpu: failed to initialize sw for display support.\n");
1267 /* create fake encoders for MST */
1268 dm_dp_create_fake_mst_encoders(adev);
1270 /* TODO: Add_display_info? */
1272 /* TODO use dynamic cursor width */
1273 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1274 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1276 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1278 "amdgpu: failed to initialize sw for display support.\n");
1283 DRM_DEBUG_DRIVER("KMS initialized.\n");
1287 amdgpu_dm_fini(adev);
1292 static int amdgpu_dm_early_fini(void *handle)
1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296 amdgpu_dm_audio_fini(adev);
1301 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1305 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1306 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1309 amdgpu_dm_destroy_drm_device(&adev->dm);
1311 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1312 if (adev->dm.crc_rd_wrk) {
1313 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1314 kfree(adev->dm.crc_rd_wrk);
1315 adev->dm.crc_rd_wrk = NULL;
1318 #ifdef CONFIG_DRM_AMD_DC_HDCP
1319 if (adev->dm.hdcp_workqueue) {
1320 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1321 adev->dm.hdcp_workqueue = NULL;
1325 dc_deinit_callbacks(adev->dm.dc);
1328 #if defined(CONFIG_DRM_AMD_DC_DCN)
1329 if (adev->dm.vblank_workqueue) {
1330 adev->dm.vblank_workqueue->dm = NULL;
1331 kfree(adev->dm.vblank_workqueue);
1332 adev->dm.vblank_workqueue = NULL;
1336 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1338 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1339 kfree(adev->dm.dmub_notify);
1340 adev->dm.dmub_notify = NULL;
1343 if (adev->dm.dmub_bo)
1344 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1345 &adev->dm.dmub_bo_gpu_addr,
1346 &adev->dm.dmub_bo_cpu_addr);
1348 /* DC Destroy TODO: Replace destroy DAL */
1350 dc_destroy(&adev->dm.dc);
1352 * TODO: pageflip, vlank interrupt
1354 * amdgpu_dm_irq_fini(adev);
1357 if (adev->dm.cgs_device) {
1358 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1359 adev->dm.cgs_device = NULL;
1361 if (adev->dm.freesync_module) {
1362 mod_freesync_destroy(adev->dm.freesync_module);
1363 adev->dm.freesync_module = NULL;
1366 mutex_destroy(&adev->dm.audio_lock);
1367 mutex_destroy(&adev->dm.dc_lock);
1372 static int load_dmcu_fw(struct amdgpu_device *adev)
1374 const char *fw_name_dmcu = NULL;
1376 const struct dmcu_firmware_header_v1_0 *hdr;
1378 switch(adev->asic_type) {
1379 #if defined(CONFIG_DRM_AMD_DC_SI)
1394 case CHIP_POLARIS11:
1395 case CHIP_POLARIS10:
1396 case CHIP_POLARIS12:
1404 case CHIP_SIENNA_CICHLID:
1405 case CHIP_NAVY_FLOUNDER:
1406 case CHIP_DIMGREY_CAVEFISH:
1407 case CHIP_BEIGE_GOBY:
1409 case CHIP_YELLOW_CARP:
1412 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1415 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1416 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1418 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1423 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1427 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1428 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1432 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1434 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1435 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1436 adev->dm.fw_dmcu = NULL;
1440 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1445 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1447 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1449 release_firmware(adev->dm.fw_dmcu);
1450 adev->dm.fw_dmcu = NULL;
1454 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1455 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1457 adev->firmware.fw_size +=
1458 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1461 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1462 adev->firmware.fw_size +=
1463 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1465 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1467 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1472 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1474 struct amdgpu_device *adev = ctx;
1476 return dm_read_reg(adev->dm.dc->ctx, address);
1479 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1482 struct amdgpu_device *adev = ctx;
1484 return dm_write_reg(adev->dm.dc->ctx, address, value);
1487 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1489 struct dmub_srv_create_params create_params;
1490 struct dmub_srv_region_params region_params;
1491 struct dmub_srv_region_info region_info;
1492 struct dmub_srv_fb_params fb_params;
1493 struct dmub_srv_fb_info *fb_info;
1494 struct dmub_srv *dmub_srv;
1495 const struct dmcub_firmware_header_v1_0 *hdr;
1496 const char *fw_name_dmub;
1497 enum dmub_asic dmub_asic;
1498 enum dmub_status status;
1501 switch (adev->asic_type) {
1503 dmub_asic = DMUB_ASIC_DCN21;
1504 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1505 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1506 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1508 case CHIP_SIENNA_CICHLID:
1509 dmub_asic = DMUB_ASIC_DCN30;
1510 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1512 case CHIP_NAVY_FLOUNDER:
1513 dmub_asic = DMUB_ASIC_DCN30;
1514 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1517 dmub_asic = DMUB_ASIC_DCN301;
1518 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1520 case CHIP_DIMGREY_CAVEFISH:
1521 dmub_asic = DMUB_ASIC_DCN302;
1522 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1524 case CHIP_BEIGE_GOBY:
1525 dmub_asic = DMUB_ASIC_DCN303;
1526 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1528 case CHIP_YELLOW_CARP:
1529 dmub_asic = DMUB_ASIC_DCN31;
1530 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1534 /* ASIC doesn't support DMUB. */
1538 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1540 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1544 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1546 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1550 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1552 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1553 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1554 AMDGPU_UCODE_ID_DMCUB;
1555 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1557 adev->firmware.fw_size +=
1558 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1560 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1561 adev->dm.dmcub_fw_version);
1564 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1566 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1567 dmub_srv = adev->dm.dmub_srv;
1570 DRM_ERROR("Failed to allocate DMUB service!\n");
1574 memset(&create_params, 0, sizeof(create_params));
1575 create_params.user_ctx = adev;
1576 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1577 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1578 create_params.asic = dmub_asic;
1580 /* Create the DMUB service. */
1581 status = dmub_srv_create(dmub_srv, &create_params);
1582 if (status != DMUB_STATUS_OK) {
1583 DRM_ERROR("Error creating DMUB service: %d\n", status);
1587 /* Calculate the size of all the regions for the DMUB service. */
1588 memset(®ion_params, 0, sizeof(region_params));
1590 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1591 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1592 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1593 region_params.vbios_size = adev->bios_size;
1594 region_params.fw_bss_data = region_params.bss_data_size ?
1595 adev->dm.dmub_fw->data +
1596 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1597 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1598 region_params.fw_inst_const =
1599 adev->dm.dmub_fw->data +
1600 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1603 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1606 if (status != DMUB_STATUS_OK) {
1607 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1612 * Allocate a framebuffer based on the total size of all the regions.
1613 * TODO: Move this into GART.
1615 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1616 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1617 &adev->dm.dmub_bo_gpu_addr,
1618 &adev->dm.dmub_bo_cpu_addr);
1622 /* Rebase the regions on the framebuffer address. */
1623 memset(&fb_params, 0, sizeof(fb_params));
1624 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1625 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1626 fb_params.region_info = ®ion_info;
1628 adev->dm.dmub_fb_info =
1629 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1630 fb_info = adev->dm.dmub_fb_info;
1634 "Failed to allocate framebuffer info for DMUB service!\n");
1638 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1639 if (status != DMUB_STATUS_OK) {
1640 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1647 static int dm_sw_init(void *handle)
1649 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1652 r = dm_dmub_sw_init(adev);
1656 return load_dmcu_fw(adev);
1659 static int dm_sw_fini(void *handle)
1661 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1663 kfree(adev->dm.dmub_fb_info);
1664 adev->dm.dmub_fb_info = NULL;
1666 if (adev->dm.dmub_srv) {
1667 dmub_srv_destroy(adev->dm.dmub_srv);
1668 adev->dm.dmub_srv = NULL;
1671 release_firmware(adev->dm.dmub_fw);
1672 adev->dm.dmub_fw = NULL;
1674 release_firmware(adev->dm.fw_dmcu);
1675 adev->dm.fw_dmcu = NULL;
1680 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1682 struct amdgpu_dm_connector *aconnector;
1683 struct drm_connector *connector;
1684 struct drm_connector_list_iter iter;
1687 drm_connector_list_iter_begin(dev, &iter);
1688 drm_for_each_connector_iter(connector, &iter) {
1689 aconnector = to_amdgpu_dm_connector(connector);
1690 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1691 aconnector->mst_mgr.aux) {
1692 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1694 aconnector->base.base.id);
1696 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1698 DRM_ERROR("DM_MST: Failed to start MST\n");
1699 aconnector->dc_link->type =
1700 dc_connection_single;
1705 drm_connector_list_iter_end(&iter);
1710 static int dm_late_init(void *handle)
1712 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1714 struct dmcu_iram_parameters params;
1715 unsigned int linear_lut[16];
1717 struct dmcu *dmcu = NULL;
1719 dmcu = adev->dm.dc->res_pool->dmcu;
1721 for (i = 0; i < 16; i++)
1722 linear_lut[i] = 0xFFFF * i / 15;
1725 params.backlight_ramping_start = 0xCCCC;
1726 params.backlight_ramping_reduction = 0xCCCCCCCC;
1727 params.backlight_lut_array_size = 16;
1728 params.backlight_lut_array = linear_lut;
1730 /* Min backlight level after ABM reduction, Don't allow below 1%
1731 * 0xFFFF x 0.01 = 0x28F
1733 params.min_abm_backlight = 0x28F;
1734 /* In the case where abm is implemented on dmcub,
1735 * dmcu object will be null.
1736 * ABM 2.4 and up are implemented on dmcub.
1739 if (!dmcu_load_iram(dmcu, params))
1741 } else if (adev->dm.dc->ctx->dmub_srv) {
1742 struct dc_link *edp_links[MAX_NUM_EDP];
1745 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1746 for (i = 0; i < edp_num; i++) {
1747 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1752 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1755 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1757 struct amdgpu_dm_connector *aconnector;
1758 struct drm_connector *connector;
1759 struct drm_connector_list_iter iter;
1760 struct drm_dp_mst_topology_mgr *mgr;
1762 bool need_hotplug = false;
1764 drm_connector_list_iter_begin(dev, &iter);
1765 drm_for_each_connector_iter(connector, &iter) {
1766 aconnector = to_amdgpu_dm_connector(connector);
1767 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1768 aconnector->mst_port)
1771 mgr = &aconnector->mst_mgr;
1774 drm_dp_mst_topology_mgr_suspend(mgr);
1776 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1778 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1779 need_hotplug = true;
1783 drm_connector_list_iter_end(&iter);
1786 drm_kms_helper_hotplug_event(dev);
1789 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1791 struct smu_context *smu = &adev->smu;
1794 if (!is_support_sw_smu(adev))
1797 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1798 * on window driver dc implementation.
1799 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1800 * should be passed to smu during boot up and resume from s3.
1801 * boot up: dc calculate dcn watermark clock settings within dc_create,
1802 * dcn20_resource_construct
1803 * then call pplib functions below to pass the settings to smu:
1804 * smu_set_watermarks_for_clock_ranges
1805 * smu_set_watermarks_table
1806 * navi10_set_watermarks_table
1807 * smu_write_watermarks_table
1809 * For Renoir, clock settings of dcn watermark are also fixed values.
1810 * dc has implemented different flow for window driver:
1811 * dc_hardware_init / dc_set_power_state
1816 * smu_set_watermarks_for_clock_ranges
1817 * renoir_set_watermarks_table
1818 * smu_write_watermarks_table
1821 * dc_hardware_init -> amdgpu_dm_init
1822 * dc_set_power_state --> dm_resume
1824 * therefore, this function apply to navi10/12/14 but not Renoir
1827 switch(adev->asic_type) {
1836 ret = smu_write_watermarks_table(smu);
1838 DRM_ERROR("Failed to update WMTABLE!\n");
1846 * dm_hw_init() - Initialize DC device
1847 * @handle: The base driver device containing the amdgpu_dm device.
1849 * Initialize the &struct amdgpu_display_manager device. This involves calling
1850 * the initializers of each DM component, then populating the struct with them.
1852 * Although the function implies hardware initialization, both hardware and
1853 * software are initialized here. Splitting them out to their relevant init
1854 * hooks is a future TODO item.
1856 * Some notable things that are initialized here:
1858 * - Display Core, both software and hardware
1859 * - DC modules that we need (freesync and color management)
1860 * - DRM software states
1861 * - Interrupt sources and handlers
1863 * - Debug FS entries, if enabled
1865 static int dm_hw_init(void *handle)
1867 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1868 /* Create DAL display manager */
1869 amdgpu_dm_init(adev);
1870 amdgpu_dm_hpd_init(adev);
1876 * dm_hw_fini() - Teardown DC device
1877 * @handle: The base driver device containing the amdgpu_dm device.
1879 * Teardown components within &struct amdgpu_display_manager that require
1880 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1881 * were loaded. Also flush IRQ workqueues and disable them.
1883 static int dm_hw_fini(void *handle)
1885 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1887 amdgpu_dm_hpd_fini(adev);
1889 amdgpu_dm_irq_fini(adev);
1890 amdgpu_dm_fini(adev);
1895 static int dm_enable_vblank(struct drm_crtc *crtc);
1896 static void dm_disable_vblank(struct drm_crtc *crtc);
1898 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1899 struct dc_state *state, bool enable)
1901 enum dc_irq_source irq_source;
1902 struct amdgpu_crtc *acrtc;
1906 for (i = 0; i < state->stream_count; i++) {
1907 acrtc = get_crtc_by_otg_inst(
1908 adev, state->stream_status[i].primary_otg_inst);
1910 if (acrtc && state->stream_status[i].plane_count != 0) {
1911 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1912 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1913 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1914 acrtc->crtc_id, enable ? "en" : "dis", rc);
1916 DRM_WARN("Failed to %s pflip interrupts\n",
1917 enable ? "enable" : "disable");
1920 rc = dm_enable_vblank(&acrtc->base);
1922 DRM_WARN("Failed to enable vblank interrupts\n");
1924 dm_disable_vblank(&acrtc->base);
1932 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1934 struct dc_state *context = NULL;
1935 enum dc_status res = DC_ERROR_UNEXPECTED;
1937 struct dc_stream_state *del_streams[MAX_PIPES];
1938 int del_streams_count = 0;
1940 memset(del_streams, 0, sizeof(del_streams));
1942 context = dc_create_state(dc);
1943 if (context == NULL)
1944 goto context_alloc_fail;
1946 dc_resource_state_copy_construct_current(dc, context);
1948 /* First remove from context all streams */
1949 for (i = 0; i < context->stream_count; i++) {
1950 struct dc_stream_state *stream = context->streams[i];
1952 del_streams[del_streams_count++] = stream;
1955 /* Remove all planes for removed streams and then remove the streams */
1956 for (i = 0; i < del_streams_count; i++) {
1957 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1958 res = DC_FAIL_DETACH_SURFACES;
1962 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1968 res = dc_validate_global_state(dc, context, false);
1971 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1975 res = dc_commit_state(dc, context);
1978 dc_release_state(context);
1984 static int dm_suspend(void *handle)
1986 struct amdgpu_device *adev = handle;
1987 struct amdgpu_display_manager *dm = &adev->dm;
1990 if (amdgpu_in_reset(adev)) {
1991 mutex_lock(&dm->dc_lock);
1993 #if defined(CONFIG_DRM_AMD_DC_DCN)
1994 dc_allow_idle_optimizations(adev->dm.dc, false);
1997 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1999 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2001 amdgpu_dm_commit_zero_streams(dm->dc);
2003 amdgpu_dm_irq_suspend(adev);
2008 WARN_ON(adev->dm.cached_state);
2009 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2011 s3_handle_mst(adev_to_drm(adev), true);
2013 amdgpu_dm_irq_suspend(adev);
2015 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2020 static struct amdgpu_dm_connector *
2021 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2022 struct drm_crtc *crtc)
2025 struct drm_connector_state *new_con_state;
2026 struct drm_connector *connector;
2027 struct drm_crtc *crtc_from_state;
2029 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2030 crtc_from_state = new_con_state->crtc;
2032 if (crtc_from_state == crtc)
2033 return to_amdgpu_dm_connector(connector);
2039 static void emulated_link_detect(struct dc_link *link)
2041 struct dc_sink_init_data sink_init_data = { 0 };
2042 struct display_sink_capability sink_caps = { 0 };
2043 enum dc_edid_status edid_status;
2044 struct dc_context *dc_ctx = link->ctx;
2045 struct dc_sink *sink = NULL;
2046 struct dc_sink *prev_sink = NULL;
2048 link->type = dc_connection_none;
2049 prev_sink = link->local_sink;
2052 dc_sink_release(prev_sink);
2054 switch (link->connector_signal) {
2055 case SIGNAL_TYPE_HDMI_TYPE_A: {
2056 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2057 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2061 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2062 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2063 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2067 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2068 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2069 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2073 case SIGNAL_TYPE_LVDS: {
2074 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2075 sink_caps.signal = SIGNAL_TYPE_LVDS;
2079 case SIGNAL_TYPE_EDP: {
2080 sink_caps.transaction_type =
2081 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2082 sink_caps.signal = SIGNAL_TYPE_EDP;
2086 case SIGNAL_TYPE_DISPLAY_PORT: {
2087 sink_caps.transaction_type =
2088 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2089 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2094 DC_ERROR("Invalid connector type! signal:%d\n",
2095 link->connector_signal);
2099 sink_init_data.link = link;
2100 sink_init_data.sink_signal = sink_caps.signal;
2102 sink = dc_sink_create(&sink_init_data);
2104 DC_ERROR("Failed to create sink!\n");
2108 /* dc_sink_create returns a new reference */
2109 link->local_sink = sink;
2111 edid_status = dm_helpers_read_local_edid(
2116 if (edid_status != EDID_OK)
2117 DC_ERROR("Failed to read EDID");
2121 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2122 struct amdgpu_display_manager *dm)
2125 struct dc_surface_update surface_updates[MAX_SURFACES];
2126 struct dc_plane_info plane_infos[MAX_SURFACES];
2127 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2128 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2129 struct dc_stream_update stream_update;
2133 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2136 dm_error("Failed to allocate update bundle\n");
2140 for (k = 0; k < dc_state->stream_count; k++) {
2141 bundle->stream_update.stream = dc_state->streams[k];
2143 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2144 bundle->surface_updates[m].surface =
2145 dc_state->stream_status->plane_states[m];
2146 bundle->surface_updates[m].surface->force_full_update =
2149 dc_commit_updates_for_stream(
2150 dm->dc, bundle->surface_updates,
2151 dc_state->stream_status->plane_count,
2152 dc_state->streams[k], &bundle->stream_update, dc_state);
2161 static void dm_set_dpms_off(struct dc_link *link)
2163 struct dc_stream_state *stream_state;
2164 struct amdgpu_dm_connector *aconnector = link->priv;
2165 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2166 struct dc_stream_update stream_update;
2167 bool dpms_off = true;
2169 memset(&stream_update, 0, sizeof(stream_update));
2170 stream_update.dpms_off = &dpms_off;
2172 mutex_lock(&adev->dm.dc_lock);
2173 stream_state = dc_stream_find_from_link(link);
2175 if (stream_state == NULL) {
2176 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2177 mutex_unlock(&adev->dm.dc_lock);
2181 stream_update.stream = stream_state;
2182 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2183 stream_state, &stream_update,
2184 stream_state->ctx->dc->current_state);
2185 mutex_unlock(&adev->dm.dc_lock);
2188 static int dm_resume(void *handle)
2190 struct amdgpu_device *adev = handle;
2191 struct drm_device *ddev = adev_to_drm(adev);
2192 struct amdgpu_display_manager *dm = &adev->dm;
2193 struct amdgpu_dm_connector *aconnector;
2194 struct drm_connector *connector;
2195 struct drm_connector_list_iter iter;
2196 struct drm_crtc *crtc;
2197 struct drm_crtc_state *new_crtc_state;
2198 struct dm_crtc_state *dm_new_crtc_state;
2199 struct drm_plane *plane;
2200 struct drm_plane_state *new_plane_state;
2201 struct dm_plane_state *dm_new_plane_state;
2202 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2203 enum dc_connection_type new_connection_type = dc_connection_none;
2204 struct dc_state *dc_state;
2207 if (amdgpu_in_reset(adev)) {
2208 dc_state = dm->cached_dc_state;
2210 r = dm_dmub_hw_init(adev);
2212 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2214 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2217 amdgpu_dm_irq_resume_early(adev);
2219 for (i = 0; i < dc_state->stream_count; i++) {
2220 dc_state->streams[i]->mode_changed = true;
2221 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2222 dc_state->stream_status->plane_states[j]->update_flags.raw
2226 #if defined(CONFIG_DRM_AMD_DC_DCN)
2228 * Resource allocation happens for link encoders for newer ASIC in
2229 * dc_validate_global_state, so we need to revalidate it.
2231 * This shouldn't fail (it passed once before), so warn if it does.
2233 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2236 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2238 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2240 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2242 dc_release_state(dm->cached_dc_state);
2243 dm->cached_dc_state = NULL;
2245 amdgpu_dm_irq_resume_late(adev);
2247 mutex_unlock(&dm->dc_lock);
2251 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2252 dc_release_state(dm_state->context);
2253 dm_state->context = dc_create_state(dm->dc);
2254 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2255 dc_resource_state_construct(dm->dc, dm_state->context);
2257 /* Before powering on DC we need to re-initialize DMUB. */
2258 r = dm_dmub_hw_init(adev);
2260 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2262 /* power on hardware */
2263 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2265 /* program HPD filter */
2269 * early enable HPD Rx IRQ, should be done before set mode as short
2270 * pulse interrupts are used for MST
2272 amdgpu_dm_irq_resume_early(adev);
2274 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2275 s3_handle_mst(ddev, false);
2278 drm_connector_list_iter_begin(ddev, &iter);
2279 drm_for_each_connector_iter(connector, &iter) {
2280 aconnector = to_amdgpu_dm_connector(connector);
2283 * this is the case when traversing through already created
2284 * MST connectors, should be skipped
2286 if (aconnector->mst_port)
2289 mutex_lock(&aconnector->hpd_lock);
2290 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2291 DRM_ERROR("KMS: Failed to detect connector\n");
2293 if (aconnector->base.force && new_connection_type == dc_connection_none)
2294 emulated_link_detect(aconnector->dc_link);
2296 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2298 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2299 aconnector->fake_enable = false;
2301 if (aconnector->dc_sink)
2302 dc_sink_release(aconnector->dc_sink);
2303 aconnector->dc_sink = NULL;
2304 amdgpu_dm_update_connector_after_detect(aconnector);
2305 mutex_unlock(&aconnector->hpd_lock);
2307 drm_connector_list_iter_end(&iter);
2309 /* Force mode set in atomic commit */
2310 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2311 new_crtc_state->active_changed = true;
2314 * atomic_check is expected to create the dc states. We need to release
2315 * them here, since they were duplicated as part of the suspend
2318 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2319 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2320 if (dm_new_crtc_state->stream) {
2321 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2322 dc_stream_release(dm_new_crtc_state->stream);
2323 dm_new_crtc_state->stream = NULL;
2327 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2328 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2329 if (dm_new_plane_state->dc_state) {
2330 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2331 dc_plane_state_release(dm_new_plane_state->dc_state);
2332 dm_new_plane_state->dc_state = NULL;
2336 drm_atomic_helper_resume(ddev, dm->cached_state);
2338 dm->cached_state = NULL;
2340 amdgpu_dm_irq_resume_late(adev);
2342 amdgpu_dm_smu_write_watermarks_table(adev);
2350 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2351 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2352 * the base driver's device list to be initialized and torn down accordingly.
2354 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2357 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2359 .early_init = dm_early_init,
2360 .late_init = dm_late_init,
2361 .sw_init = dm_sw_init,
2362 .sw_fini = dm_sw_fini,
2363 .early_fini = amdgpu_dm_early_fini,
2364 .hw_init = dm_hw_init,
2365 .hw_fini = dm_hw_fini,
2366 .suspend = dm_suspend,
2367 .resume = dm_resume,
2368 .is_idle = dm_is_idle,
2369 .wait_for_idle = dm_wait_for_idle,
2370 .check_soft_reset = dm_check_soft_reset,
2371 .soft_reset = dm_soft_reset,
2372 .set_clockgating_state = dm_set_clockgating_state,
2373 .set_powergating_state = dm_set_powergating_state,
2376 const struct amdgpu_ip_block_version dm_ip_block =
2378 .type = AMD_IP_BLOCK_TYPE_DCE,
2382 .funcs = &amdgpu_dm_funcs,
2392 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2393 .fb_create = amdgpu_display_user_framebuffer_create,
2394 .get_format_info = amd_get_format_info,
2395 .output_poll_changed = drm_fb_helper_output_poll_changed,
2396 .atomic_check = amdgpu_dm_atomic_check,
2397 .atomic_commit = drm_atomic_helper_commit,
2400 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2401 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2404 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2406 u32 max_cll, min_cll, max, min, q, r;
2407 struct amdgpu_dm_backlight_caps *caps;
2408 struct amdgpu_display_manager *dm;
2409 struct drm_connector *conn_base;
2410 struct amdgpu_device *adev;
2411 struct dc_link *link = NULL;
2412 static const u8 pre_computed_values[] = {
2413 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2414 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2416 if (!aconnector || !aconnector->dc_link)
2419 link = aconnector->dc_link;
2420 if (link->connector_signal != SIGNAL_TYPE_EDP)
2423 conn_base = &aconnector->base;
2424 adev = drm_to_adev(conn_base->dev);
2426 caps = &dm->backlight_caps;
2427 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2428 caps->aux_support = false;
2429 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2430 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2432 if (caps->ext_caps->bits.oled == 1 /*||
2433 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2434 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2435 caps->aux_support = true;
2437 if (amdgpu_backlight == 0)
2438 caps->aux_support = false;
2439 else if (amdgpu_backlight == 1)
2440 caps->aux_support = true;
2442 /* From the specification (CTA-861-G), for calculating the maximum
2443 * luminance we need to use:
2444 * Luminance = 50*2**(CV/32)
2445 * Where CV is a one-byte value.
2446 * For calculating this expression we may need float point precision;
2447 * to avoid this complexity level, we take advantage that CV is divided
2448 * by a constant. From the Euclids division algorithm, we know that CV
2449 * can be written as: CV = 32*q + r. Next, we replace CV in the
2450 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2451 * need to pre-compute the value of r/32. For pre-computing the values
2452 * We just used the following Ruby line:
2453 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2454 * The results of the above expressions can be verified at
2455 * pre_computed_values.
2459 max = (1 << q) * pre_computed_values[r];
2461 // min luminance: maxLum * (CV/255)^2 / 100
2462 q = DIV_ROUND_CLOSEST(min_cll, 255);
2463 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2465 caps->aux_max_input_signal = max;
2466 caps->aux_min_input_signal = min;
2469 void amdgpu_dm_update_connector_after_detect(
2470 struct amdgpu_dm_connector *aconnector)
2472 struct drm_connector *connector = &aconnector->base;
2473 struct drm_device *dev = connector->dev;
2474 struct dc_sink *sink;
2476 /* MST handled by drm_mst framework */
2477 if (aconnector->mst_mgr.mst_state == true)
2480 sink = aconnector->dc_link->local_sink;
2482 dc_sink_retain(sink);
2485 * Edid mgmt connector gets first update only in mode_valid hook and then
2486 * the connector sink is set to either fake or physical sink depends on link status.
2487 * Skip if already done during boot.
2489 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2490 && aconnector->dc_em_sink) {
2493 * For S3 resume with headless use eml_sink to fake stream
2494 * because on resume connector->sink is set to NULL
2496 mutex_lock(&dev->mode_config.mutex);
2499 if (aconnector->dc_sink) {
2500 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 * retain and release below are used to
2503 * bump up refcount for sink because the link doesn't point
2504 * to it anymore after disconnect, so on next crtc to connector
2505 * reshuffle by UMD we will get into unwanted dc_sink release
2507 dc_sink_release(aconnector->dc_sink);
2509 aconnector->dc_sink = sink;
2510 dc_sink_retain(aconnector->dc_sink);
2511 amdgpu_dm_update_freesync_caps(connector,
2514 amdgpu_dm_update_freesync_caps(connector, NULL);
2515 if (!aconnector->dc_sink) {
2516 aconnector->dc_sink = aconnector->dc_em_sink;
2517 dc_sink_retain(aconnector->dc_sink);
2521 mutex_unlock(&dev->mode_config.mutex);
2524 dc_sink_release(sink);
2529 * TODO: temporary guard to look for proper fix
2530 * if this sink is MST sink, we should not do anything
2532 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2533 dc_sink_release(sink);
2537 if (aconnector->dc_sink == sink) {
2539 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2542 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2543 aconnector->connector_id);
2545 dc_sink_release(sink);
2549 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2550 aconnector->connector_id, aconnector->dc_sink, sink);
2552 mutex_lock(&dev->mode_config.mutex);
2555 * 1. Update status of the drm connector
2556 * 2. Send an event and let userspace tell us what to do
2560 * TODO: check if we still need the S3 mode update workaround.
2561 * If yes, put it here.
2563 if (aconnector->dc_sink) {
2564 amdgpu_dm_update_freesync_caps(connector, NULL);
2565 dc_sink_release(aconnector->dc_sink);
2568 aconnector->dc_sink = sink;
2569 dc_sink_retain(aconnector->dc_sink);
2570 if (sink->dc_edid.length == 0) {
2571 aconnector->edid = NULL;
2572 if (aconnector->dc_link->aux_mode) {
2573 drm_dp_cec_unset_edid(
2574 &aconnector->dm_dp_aux.aux);
2578 (struct edid *)sink->dc_edid.raw_edid;
2580 drm_connector_update_edid_property(connector,
2582 if (aconnector->dc_link->aux_mode)
2583 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2587 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2588 update_connector_ext_caps(aconnector);
2590 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2591 amdgpu_dm_update_freesync_caps(connector, NULL);
2592 drm_connector_update_edid_property(connector, NULL);
2593 aconnector->num_modes = 0;
2594 dc_sink_release(aconnector->dc_sink);
2595 aconnector->dc_sink = NULL;
2596 aconnector->edid = NULL;
2597 #ifdef CONFIG_DRM_AMD_DC_HDCP
2598 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2599 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2600 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2604 mutex_unlock(&dev->mode_config.mutex);
2606 update_subconnector_property(aconnector);
2609 dc_sink_release(sink);
2612 static void handle_hpd_irq(void *param)
2614 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2615 struct drm_connector *connector = &aconnector->base;
2616 struct drm_device *dev = connector->dev;
2617 enum dc_connection_type new_connection_type = dc_connection_none;
2618 struct amdgpu_device *adev = drm_to_adev(dev);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2623 if (adev->dm.disable_hpd_irq)
2627 * In case of failure or MST no need to update connector status or notify the OS
2628 * since (for MST case) MST does this in its own context.
2630 mutex_lock(&aconnector->hpd_lock);
2632 #ifdef CONFIG_DRM_AMD_DC_HDCP
2633 if (adev->dm.hdcp_workqueue) {
2634 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2635 dm_con_state->update_hdcp = true;
2638 if (aconnector->fake_enable)
2639 aconnector->fake_enable = false;
2641 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2642 DRM_ERROR("KMS: Failed to detect connector\n");
2644 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2645 emulated_link_detect(aconnector->dc_link);
2648 drm_modeset_lock_all(dev);
2649 dm_restore_drm_connector_state(dev, connector);
2650 drm_modeset_unlock_all(dev);
2652 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2653 drm_kms_helper_hotplug_event(dev);
2655 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2656 if (new_connection_type == dc_connection_none &&
2657 aconnector->dc_link->type == dc_connection_none)
2658 dm_set_dpms_off(aconnector->dc_link);
2660 amdgpu_dm_update_connector_after_detect(aconnector);
2662 drm_modeset_lock_all(dev);
2663 dm_restore_drm_connector_state(dev, connector);
2664 drm_modeset_unlock_all(dev);
2666 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2667 drm_kms_helper_hotplug_event(dev);
2669 mutex_unlock(&aconnector->hpd_lock);
2673 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2675 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2677 bool new_irq_handled = false;
2679 int dpcd_bytes_to_read;
2681 const int max_process_count = 30;
2682 int process_count = 0;
2684 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2686 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2687 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2688 /* DPCD 0x200 - 0x201 for downstream IRQ */
2689 dpcd_addr = DP_SINK_COUNT;
2691 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2692 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2693 dpcd_addr = DP_SINK_COUNT_ESI;
2696 dret = drm_dp_dpcd_read(
2697 &aconnector->dm_dp_aux.aux,
2700 dpcd_bytes_to_read);
2702 while (dret == dpcd_bytes_to_read &&
2703 process_count < max_process_count) {
2709 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2710 /* handle HPD short pulse irq */
2711 if (aconnector->mst_mgr.mst_state)
2713 &aconnector->mst_mgr,
2717 if (new_irq_handled) {
2718 /* ACK at DPCD to notify down stream */
2719 const int ack_dpcd_bytes_to_write =
2720 dpcd_bytes_to_read - 1;
2722 for (retry = 0; retry < 3; retry++) {
2725 wret = drm_dp_dpcd_write(
2726 &aconnector->dm_dp_aux.aux,
2729 ack_dpcd_bytes_to_write);
2730 if (wret == ack_dpcd_bytes_to_write)
2734 /* check if there is new irq to be handled */
2735 dret = drm_dp_dpcd_read(
2736 &aconnector->dm_dp_aux.aux,
2739 dpcd_bytes_to_read);
2741 new_irq_handled = false;
2747 if (process_count == max_process_count)
2748 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2751 static void handle_hpd_rx_irq(void *param)
2753 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2754 struct drm_connector *connector = &aconnector->base;
2755 struct drm_device *dev = connector->dev;
2756 struct dc_link *dc_link = aconnector->dc_link;
2757 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2758 bool result = false;
2759 enum dc_connection_type new_connection_type = dc_connection_none;
2760 struct amdgpu_device *adev = drm_to_adev(dev);
2761 union hpd_irq_data hpd_irq_data;
2764 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2766 if (adev->dm.disable_hpd_irq)
2771 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2772 * conflict, after implement i2c helper, this mutex should be
2775 mutex_lock(&aconnector->hpd_lock);
2777 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2779 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2780 (dc_link->type == dc_connection_mst_branch)) {
2781 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2783 dm_handle_hpd_rx_irq(aconnector);
2785 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2787 dm_handle_hpd_rx_irq(aconnector);
2793 * TODO: We need the lock to avoid touching DC state while it's being
2794 * modified during automated compliance testing, or when link loss
2795 * happens. While this should be split into subhandlers and proper
2796 * interfaces to avoid having to conditionally lock like this in the
2797 * outer layer, we need this workaround temporarily to allow MST
2798 * lightup in some scenarios to avoid timeout.
2800 if (!amdgpu_in_reset(adev) &&
2801 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2802 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2803 mutex_lock(&adev->dm.dc_lock);
2807 #ifdef CONFIG_DRM_AMD_DC_HDCP
2808 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2810 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2812 if (!amdgpu_in_reset(adev) && lock_flag)
2813 mutex_unlock(&adev->dm.dc_lock);
2816 if (result && !is_mst_root_connector) {
2817 /* Downstream Port status changed. */
2818 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2819 DRM_ERROR("KMS: Failed to detect connector\n");
2821 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2822 emulated_link_detect(dc_link);
2824 if (aconnector->fake_enable)
2825 aconnector->fake_enable = false;
2827 amdgpu_dm_update_connector_after_detect(aconnector);
2830 drm_modeset_lock_all(dev);
2831 dm_restore_drm_connector_state(dev, connector);
2832 drm_modeset_unlock_all(dev);
2834 drm_kms_helper_hotplug_event(dev);
2835 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2837 if (aconnector->fake_enable)
2838 aconnector->fake_enable = false;
2840 amdgpu_dm_update_connector_after_detect(aconnector);
2843 drm_modeset_lock_all(dev);
2844 dm_restore_drm_connector_state(dev, connector);
2845 drm_modeset_unlock_all(dev);
2847 drm_kms_helper_hotplug_event(dev);
2850 #ifdef CONFIG_DRM_AMD_DC_HDCP
2851 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2852 if (adev->dm.hdcp_workqueue)
2853 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2857 if (dc_link->type != dc_connection_mst_branch)
2858 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2860 mutex_unlock(&aconnector->hpd_lock);
2863 static void register_hpd_handlers(struct amdgpu_device *adev)
2865 struct drm_device *dev = adev_to_drm(adev);
2866 struct drm_connector *connector;
2867 struct amdgpu_dm_connector *aconnector;
2868 const struct dc_link *dc_link;
2869 struct dc_interrupt_params int_params = {0};
2871 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2872 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2874 list_for_each_entry(connector,
2875 &dev->mode_config.connector_list, head) {
2877 aconnector = to_amdgpu_dm_connector(connector);
2878 dc_link = aconnector->dc_link;
2880 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2881 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2882 int_params.irq_source = dc_link->irq_source_hpd;
2884 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2886 (void *) aconnector);
2889 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2891 /* Also register for DP short pulse (hpd_rx). */
2892 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2893 int_params.irq_source = dc_link->irq_source_hpd_rx;
2895 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2897 (void *) aconnector);
2902 #if defined(CONFIG_DRM_AMD_DC_SI)
2903 /* Register IRQ sources and initialize IRQ callbacks */
2904 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2906 struct dc *dc = adev->dm.dc;
2907 struct common_irq_params *c_irq_params;
2908 struct dc_interrupt_params int_params = {0};
2911 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2913 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2917 * Actions of amdgpu_irq_add_id():
2918 * 1. Register a set() function with base driver.
2919 * Base driver will call set() function to enable/disable an
2920 * interrupt in DC hardware.
2921 * 2. Register amdgpu_dm_irq_handler().
2922 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 * coming from DC hardware.
2924 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 * for acknowledging and handling. */
2927 /* Use VBLANK interrupt */
2928 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2929 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2931 DRM_ERROR("Failed to add crtc irq id!\n");
2935 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 int_params.irq_source =
2937 dc_interrupt_to_irq_source(dc, i+1 , 0);
2939 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2941 c_irq_params->adev = adev;
2942 c_irq_params->irq_src = int_params.irq_source;
2944 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 dm_crtc_high_irq, c_irq_params);
2948 /* Use GRPH_PFLIP interrupt */
2949 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2950 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2951 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2953 DRM_ERROR("Failed to add page flip irq id!\n");
2957 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2958 int_params.irq_source =
2959 dc_interrupt_to_irq_source(dc, i, 0);
2961 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2963 c_irq_params->adev = adev;
2964 c_irq_params->irq_src = int_params.irq_source;
2966 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2967 dm_pflip_high_irq, c_irq_params);
2972 r = amdgpu_irq_add_id(adev, client_id,
2973 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2975 DRM_ERROR("Failed to add hpd irq id!\n");
2979 register_hpd_handlers(adev);
2985 /* Register IRQ sources and initialize IRQ callbacks */
2986 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2988 struct dc *dc = adev->dm.dc;
2989 struct common_irq_params *c_irq_params;
2990 struct dc_interrupt_params int_params = {0};
2993 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2995 if (adev->asic_type >= CHIP_VEGA10)
2996 client_id = SOC15_IH_CLIENTID_DCE;
2998 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2999 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3002 * Actions of amdgpu_irq_add_id():
3003 * 1. Register a set() function with base driver.
3004 * Base driver will call set() function to enable/disable an
3005 * interrupt in DC hardware.
3006 * 2. Register amdgpu_dm_irq_handler().
3007 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3008 * coming from DC hardware.
3009 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3010 * for acknowledging and handling. */
3012 /* Use VBLANK interrupt */
3013 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3014 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3016 DRM_ERROR("Failed to add crtc irq id!\n");
3020 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3021 int_params.irq_source =
3022 dc_interrupt_to_irq_source(dc, i, 0);
3024 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3026 c_irq_params->adev = adev;
3027 c_irq_params->irq_src = int_params.irq_source;
3029 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3030 dm_crtc_high_irq, c_irq_params);
3033 /* Use VUPDATE interrupt */
3034 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3035 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3037 DRM_ERROR("Failed to add vupdate irq id!\n");
3041 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3042 int_params.irq_source =
3043 dc_interrupt_to_irq_source(dc, i, 0);
3045 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3047 c_irq_params->adev = adev;
3048 c_irq_params->irq_src = int_params.irq_source;
3050 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3051 dm_vupdate_high_irq, c_irq_params);
3054 /* Use GRPH_PFLIP interrupt */
3055 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3056 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3057 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3059 DRM_ERROR("Failed to add page flip irq id!\n");
3063 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3064 int_params.irq_source =
3065 dc_interrupt_to_irq_source(dc, i, 0);
3067 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3069 c_irq_params->adev = adev;
3070 c_irq_params->irq_src = int_params.irq_source;
3072 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3073 dm_pflip_high_irq, c_irq_params);
3078 r = amdgpu_irq_add_id(adev, client_id,
3079 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3081 DRM_ERROR("Failed to add hpd irq id!\n");
3085 register_hpd_handlers(adev);
3090 #if defined(CONFIG_DRM_AMD_DC_DCN)
3091 /* Register IRQ sources and initialize IRQ callbacks */
3092 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3094 struct dc *dc = adev->dm.dc;
3095 struct common_irq_params *c_irq_params;
3096 struct dc_interrupt_params int_params = {0};
3099 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3100 static const unsigned int vrtl_int_srcid[] = {
3101 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3102 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3103 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3104 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3105 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3106 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3110 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3111 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3114 * Actions of amdgpu_irq_add_id():
3115 * 1. Register a set() function with base driver.
3116 * Base driver will call set() function to enable/disable an
3117 * interrupt in DC hardware.
3118 * 2. Register amdgpu_dm_irq_handler().
3119 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3120 * coming from DC hardware.
3121 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3122 * for acknowledging and handling.
3125 /* Use VSTARTUP interrupt */
3126 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3127 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3129 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3132 DRM_ERROR("Failed to add crtc irq id!\n");
3136 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3137 int_params.irq_source =
3138 dc_interrupt_to_irq_source(dc, i, 0);
3140 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3142 c_irq_params->adev = adev;
3143 c_irq_params->irq_src = int_params.irq_source;
3145 amdgpu_dm_irq_register_interrupt(
3146 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3149 /* Use otg vertical line interrupt */
3150 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3151 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3152 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3153 vrtl_int_srcid[i], &adev->vline0_irq);
3156 DRM_ERROR("Failed to add vline0 irq id!\n");
3160 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3161 int_params.irq_source =
3162 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3164 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3165 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3169 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3170 - DC_IRQ_SOURCE_DC1_VLINE0];
3172 c_irq_params->adev = adev;
3173 c_irq_params->irq_src = int_params.irq_source;
3175 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3176 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3180 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3181 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3182 * to trigger at end of each vblank, regardless of state of the lock,
3183 * matching DCE behaviour.
3185 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3186 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3188 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3191 DRM_ERROR("Failed to add vupdate irq id!\n");
3195 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3196 int_params.irq_source =
3197 dc_interrupt_to_irq_source(dc, i, 0);
3199 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3201 c_irq_params->adev = adev;
3202 c_irq_params->irq_src = int_params.irq_source;
3204 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3205 dm_vupdate_high_irq, c_irq_params);
3208 /* Use GRPH_PFLIP interrupt */
3209 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3210 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3212 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3214 DRM_ERROR("Failed to add page flip irq id!\n");
3218 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3219 int_params.irq_source =
3220 dc_interrupt_to_irq_source(dc, i, 0);
3222 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3224 c_irq_params->adev = adev;
3225 c_irq_params->irq_src = int_params.irq_source;
3227 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3228 dm_pflip_high_irq, c_irq_params);
3233 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3236 DRM_ERROR("Failed to add hpd irq id!\n");
3240 register_hpd_handlers(adev);
3244 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3245 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3247 struct dc *dc = adev->dm.dc;
3248 struct common_irq_params *c_irq_params;
3249 struct dc_interrupt_params int_params = {0};
3252 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3253 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3255 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3256 &adev->dmub_outbox_irq);
3258 DRM_ERROR("Failed to add outbox irq id!\n");
3262 if (dc->ctx->dmub_srv) {
3263 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3264 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3265 int_params.irq_source =
3266 dc_interrupt_to_irq_source(dc, i, 0);
3268 c_irq_params = &adev->dm.dmub_outbox_params[0];
3270 c_irq_params->adev = adev;
3271 c_irq_params->irq_src = int_params.irq_source;
3273 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3274 dm_dmub_outbox1_low_irq, c_irq_params);
3282 * Acquires the lock for the atomic state object and returns
3283 * the new atomic state.
3285 * This should only be called during atomic check.
3287 static int dm_atomic_get_state(struct drm_atomic_state *state,
3288 struct dm_atomic_state **dm_state)
3290 struct drm_device *dev = state->dev;
3291 struct amdgpu_device *adev = drm_to_adev(dev);
3292 struct amdgpu_display_manager *dm = &adev->dm;
3293 struct drm_private_state *priv_state;
3298 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3299 if (IS_ERR(priv_state))
3300 return PTR_ERR(priv_state);
3302 *dm_state = to_dm_atomic_state(priv_state);
3307 static struct dm_atomic_state *
3308 dm_atomic_get_new_state(struct drm_atomic_state *state)
3310 struct drm_device *dev = state->dev;
3311 struct amdgpu_device *adev = drm_to_adev(dev);
3312 struct amdgpu_display_manager *dm = &adev->dm;
3313 struct drm_private_obj *obj;
3314 struct drm_private_state *new_obj_state;
3317 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3318 if (obj->funcs == dm->atomic_obj.funcs)
3319 return to_dm_atomic_state(new_obj_state);
3325 static struct drm_private_state *
3326 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3328 struct dm_atomic_state *old_state, *new_state;
3330 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3334 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3336 old_state = to_dm_atomic_state(obj->state);
3338 if (old_state && old_state->context)
3339 new_state->context = dc_copy_state(old_state->context);
3341 if (!new_state->context) {
3346 return &new_state->base;
3349 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3350 struct drm_private_state *state)
3352 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3354 if (dm_state && dm_state->context)
3355 dc_release_state(dm_state->context);
3360 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3361 .atomic_duplicate_state = dm_atomic_duplicate_state,
3362 .atomic_destroy_state = dm_atomic_destroy_state,
3365 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3367 struct dm_atomic_state *state;
3370 adev->mode_info.mode_config_initialized = true;
3372 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3373 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3375 adev_to_drm(adev)->mode_config.max_width = 16384;
3376 adev_to_drm(adev)->mode_config.max_height = 16384;
3378 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3379 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3380 /* indicates support for immediate flip */
3381 adev_to_drm(adev)->mode_config.async_page_flip = true;
3383 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3385 state = kzalloc(sizeof(*state), GFP_KERNEL);
3389 state->context = dc_create_state(adev->dm.dc);
3390 if (!state->context) {
3395 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3397 drm_atomic_private_obj_init(adev_to_drm(adev),
3398 &adev->dm.atomic_obj,
3400 &dm_atomic_state_funcs);
3402 r = amdgpu_display_modeset_create_props(adev);
3404 dc_release_state(state->context);
3409 r = amdgpu_dm_audio_init(adev);
3411 dc_release_state(state->context);
3419 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3420 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3421 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3423 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3424 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3426 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3428 #if defined(CONFIG_ACPI)
3429 struct amdgpu_dm_backlight_caps caps;
3431 memset(&caps, 0, sizeof(caps));
3433 if (dm->backlight_caps.caps_valid)
3436 amdgpu_acpi_get_backlight_caps(&caps);
3437 if (caps.caps_valid) {
3438 dm->backlight_caps.caps_valid = true;
3439 if (caps.aux_support)
3441 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3442 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3444 dm->backlight_caps.min_input_signal =
3445 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3446 dm->backlight_caps.max_input_signal =
3447 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3450 if (dm->backlight_caps.aux_support)
3453 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3454 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3458 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3459 unsigned *min, unsigned *max)
3464 if (caps->aux_support) {
3465 // Firmware limits are in nits, DC API wants millinits.
3466 *max = 1000 * caps->aux_max_input_signal;
3467 *min = 1000 * caps->aux_min_input_signal;
3469 // Firmware limits are 8-bit, PWM control is 16-bit.
3470 *max = 0x101 * caps->max_input_signal;
3471 *min = 0x101 * caps->min_input_signal;
3476 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3477 uint32_t brightness)
3481 if (!get_brightness_range(caps, &min, &max))
3484 // Rescale 0..255 to min..max
3485 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3486 AMDGPU_MAX_BL_LEVEL);
3489 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3490 uint32_t brightness)
3494 if (!get_brightness_range(caps, &min, &max))
3497 if (brightness < min)
3499 // Rescale min..max to 0..255
3500 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3504 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3505 u32 user_brightness)
3507 struct amdgpu_dm_backlight_caps caps;
3508 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3509 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3513 amdgpu_dm_update_backlight_caps(dm);
3514 caps = dm->backlight_caps;
3516 for (i = 0; i < dm->num_of_edps; i++) {
3517 dm->brightness[i] = user_brightness;
3518 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3519 link[i] = (struct dc_link *)dm->backlight_link[i];
3522 /* Change brightness based on AUX property */
3523 if (caps.aux_support) {
3524 for (i = 0; i < dm->num_of_edps; i++) {
3525 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3526 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3528 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3533 for (i = 0; i < dm->num_of_edps; i++) {
3534 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3536 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3545 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3547 struct amdgpu_display_manager *dm = bl_get_data(bd);
3549 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3554 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3556 struct amdgpu_dm_backlight_caps caps;
3558 amdgpu_dm_update_backlight_caps(dm);
3559 caps = dm->backlight_caps;
3561 if (caps.aux_support) {
3562 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3566 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3568 return dm->brightness[0];
3569 return convert_brightness_to_user(&caps, avg);
3571 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3573 if (ret == DC_ERROR_UNEXPECTED)
3574 return dm->brightness[0];
3575 return convert_brightness_to_user(&caps, ret);
3579 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3581 struct amdgpu_display_manager *dm = bl_get_data(bd);
3583 return amdgpu_dm_backlight_get_level(dm);
3586 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3587 .options = BL_CORE_SUSPENDRESUME,
3588 .get_brightness = amdgpu_dm_backlight_get_brightness,
3589 .update_status = amdgpu_dm_backlight_update_status,
3593 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3596 struct backlight_properties props = { 0 };
3599 amdgpu_dm_update_backlight_caps(dm);
3600 for (i = 0; i < dm->num_of_edps; i++)
3601 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3603 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3604 props.brightness = AMDGPU_MAX_BL_LEVEL;
3605 props.type = BACKLIGHT_RAW;
3607 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3608 adev_to_drm(dm->adev)->primary->index);
3610 dm->backlight_dev = backlight_device_register(bl_name,
3611 adev_to_drm(dm->adev)->dev,
3613 &amdgpu_dm_backlight_ops,
3616 if (IS_ERR(dm->backlight_dev))
3617 DRM_ERROR("DM: Backlight registration failed!\n");
3619 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3624 static int initialize_plane(struct amdgpu_display_manager *dm,
3625 struct amdgpu_mode_info *mode_info, int plane_id,
3626 enum drm_plane_type plane_type,
3627 const struct dc_plane_cap *plane_cap)
3629 struct drm_plane *plane;
3630 unsigned long possible_crtcs;
3633 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3635 DRM_ERROR("KMS: Failed to allocate plane\n");
3638 plane->type = plane_type;
3641 * HACK: IGT tests expect that the primary plane for a CRTC
3642 * can only have one possible CRTC. Only expose support for
3643 * any CRTC if they're not going to be used as a primary plane
3644 * for a CRTC - like overlay or underlay planes.
3646 possible_crtcs = 1 << plane_id;
3647 if (plane_id >= dm->dc->caps.max_streams)
3648 possible_crtcs = 0xff;
3650 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3653 DRM_ERROR("KMS: Failed to initialize plane\n");
3659 mode_info->planes[plane_id] = plane;
3665 static void register_backlight_device(struct amdgpu_display_manager *dm,
3666 struct dc_link *link)
3668 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3669 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3671 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3672 link->type != dc_connection_none) {
3674 * Event if registration failed, we should continue with
3675 * DM initialization because not having a backlight control
3676 * is better then a black screen.
3678 if (!dm->backlight_dev)
3679 amdgpu_dm_register_backlight_device(dm);
3681 if (dm->backlight_dev) {
3682 dm->backlight_link[dm->num_of_edps] = link;
3691 * In this architecture, the association
3692 * connector -> encoder -> crtc
3693 * id not really requried. The crtc and connector will hold the
3694 * display_index as an abstraction to use with DAL component
3696 * Returns 0 on success
3698 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3700 struct amdgpu_display_manager *dm = &adev->dm;
3702 struct amdgpu_dm_connector *aconnector = NULL;
3703 struct amdgpu_encoder *aencoder = NULL;
3704 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3706 int32_t primary_planes;
3707 enum dc_connection_type new_connection_type = dc_connection_none;
3708 const struct dc_plane_cap *plane;
3710 dm->display_indexes_num = dm->dc->caps.max_streams;
3711 /* Update the actual used number of crtc */
3712 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3714 link_cnt = dm->dc->caps.max_links;
3715 if (amdgpu_dm_mode_config_init(dm->adev)) {
3716 DRM_ERROR("DM: Failed to initialize mode config\n");
3720 /* There is one primary plane per CRTC */
3721 primary_planes = dm->dc->caps.max_streams;
3722 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3725 * Initialize primary planes, implicit planes for legacy IOCTLS.
3726 * Order is reversed to match iteration order in atomic check.
3728 for (i = (primary_planes - 1); i >= 0; i--) {
3729 plane = &dm->dc->caps.planes[i];
3731 if (initialize_plane(dm, mode_info, i,
3732 DRM_PLANE_TYPE_PRIMARY, plane)) {
3733 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3739 * Initialize overlay planes, index starting after primary planes.
3740 * These planes have a higher DRM index than the primary planes since
3741 * they should be considered as having a higher z-order.
3742 * Order is reversed to match iteration order in atomic check.
3744 * Only support DCN for now, and only expose one so we don't encourage
3745 * userspace to use up all the pipes.
3747 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3748 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3750 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3753 if (!plane->blends_with_above || !plane->blends_with_below)
3756 if (!plane->pixel_format_support.argb8888)
3759 if (initialize_plane(dm, NULL, primary_planes + i,
3760 DRM_PLANE_TYPE_OVERLAY, plane)) {
3761 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3765 /* Only create one overlay plane. */
3769 for (i = 0; i < dm->dc->caps.max_streams; i++)
3770 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3771 DRM_ERROR("KMS: Failed to initialize crtc\n");
3775 #if defined(CONFIG_DRM_AMD_DC_DCN)
3776 /* Use Outbox interrupt */
3777 switch (adev->asic_type) {
3778 case CHIP_SIENNA_CICHLID:
3779 case CHIP_NAVY_FLOUNDER:
3780 case CHIP_YELLOW_CARP:
3782 if (register_outbox_irq_handlers(dm->adev)) {
3783 DRM_ERROR("DM: Failed to initialize IRQ\n");
3788 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3792 /* loops over all connectors on the board */
3793 for (i = 0; i < link_cnt; i++) {
3794 struct dc_link *link = NULL;
3796 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3798 "KMS: Cannot support more than %d display indexes\n",
3799 AMDGPU_DM_MAX_DISPLAY_INDEX);
3803 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3807 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3811 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3812 DRM_ERROR("KMS: Failed to initialize encoder\n");
3816 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3817 DRM_ERROR("KMS: Failed to initialize connector\n");
3821 link = dc_get_link_at_index(dm->dc, i);
3823 if (!dc_link_detect_sink(link, &new_connection_type))
3824 DRM_ERROR("KMS: Failed to detect connector\n");
3826 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3827 emulated_link_detect(link);
3828 amdgpu_dm_update_connector_after_detect(aconnector);
3830 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3831 amdgpu_dm_update_connector_after_detect(aconnector);
3832 register_backlight_device(dm, link);
3833 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3834 amdgpu_dm_set_psr_caps(link);
3840 /* Software is initialized. Now we can register interrupt handlers. */
3841 switch (adev->asic_type) {
3842 #if defined(CONFIG_DRM_AMD_DC_SI)
3847 if (dce60_register_irq_handlers(dm->adev)) {
3848 DRM_ERROR("DM: Failed to initialize IRQ\n");
3862 case CHIP_POLARIS11:
3863 case CHIP_POLARIS10:
3864 case CHIP_POLARIS12:
3869 if (dce110_register_irq_handlers(dm->adev)) {
3870 DRM_ERROR("DM: Failed to initialize IRQ\n");
3874 #if defined(CONFIG_DRM_AMD_DC_DCN)
3880 case CHIP_SIENNA_CICHLID:
3881 case CHIP_NAVY_FLOUNDER:
3882 case CHIP_DIMGREY_CAVEFISH:
3883 case CHIP_BEIGE_GOBY:
3885 case CHIP_YELLOW_CARP:
3886 if (dcn10_register_irq_handlers(dm->adev)) {
3887 DRM_ERROR("DM: Failed to initialize IRQ\n");
3893 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3905 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3907 drm_atomic_private_obj_fini(&dm->atomic_obj);
3911 /******************************************************************************
3912 * amdgpu_display_funcs functions
3913 *****************************************************************************/
3916 * dm_bandwidth_update - program display watermarks
3918 * @adev: amdgpu_device pointer
3920 * Calculate and program the display watermarks and line buffer allocation.
3922 static void dm_bandwidth_update(struct amdgpu_device *adev)
3924 /* TODO: implement later */
3927 static const struct amdgpu_display_funcs dm_display_funcs = {
3928 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3929 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3930 .backlight_set_level = NULL, /* never called for DC */
3931 .backlight_get_level = NULL, /* never called for DC */
3932 .hpd_sense = NULL,/* called unconditionally */
3933 .hpd_set_polarity = NULL, /* called unconditionally */
3934 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3935 .page_flip_get_scanoutpos =
3936 dm_crtc_get_scanoutpos,/* called unconditionally */
3937 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3938 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3941 #if defined(CONFIG_DEBUG_KERNEL_DC)
3943 static ssize_t s3_debug_store(struct device *device,
3944 struct device_attribute *attr,
3950 struct drm_device *drm_dev = dev_get_drvdata(device);
3951 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3953 ret = kstrtoint(buf, 0, &s3_state);
3958 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3963 return ret == 0 ? count : 0;
3966 DEVICE_ATTR_WO(s3_debug);
3970 static int dm_early_init(void *handle)
3972 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3974 switch (adev->asic_type) {
3975 #if defined(CONFIG_DRM_AMD_DC_SI)
3979 adev->mode_info.num_crtc = 6;
3980 adev->mode_info.num_hpd = 6;
3981 adev->mode_info.num_dig = 6;
3984 adev->mode_info.num_crtc = 2;
3985 adev->mode_info.num_hpd = 2;
3986 adev->mode_info.num_dig = 2;
3991 adev->mode_info.num_crtc = 6;
3992 adev->mode_info.num_hpd = 6;
3993 adev->mode_info.num_dig = 6;
3996 adev->mode_info.num_crtc = 4;
3997 adev->mode_info.num_hpd = 6;
3998 adev->mode_info.num_dig = 7;
4002 adev->mode_info.num_crtc = 2;
4003 adev->mode_info.num_hpd = 6;
4004 adev->mode_info.num_dig = 6;
4008 adev->mode_info.num_crtc = 6;
4009 adev->mode_info.num_hpd = 6;
4010 adev->mode_info.num_dig = 7;
4013 adev->mode_info.num_crtc = 3;
4014 adev->mode_info.num_hpd = 6;
4015 adev->mode_info.num_dig = 9;
4018 adev->mode_info.num_crtc = 2;
4019 adev->mode_info.num_hpd = 6;
4020 adev->mode_info.num_dig = 9;
4022 case CHIP_POLARIS11:
4023 case CHIP_POLARIS12:
4024 adev->mode_info.num_crtc = 5;
4025 adev->mode_info.num_hpd = 5;
4026 adev->mode_info.num_dig = 5;
4028 case CHIP_POLARIS10:
4030 adev->mode_info.num_crtc = 6;
4031 adev->mode_info.num_hpd = 6;
4032 adev->mode_info.num_dig = 6;
4037 adev->mode_info.num_crtc = 6;
4038 adev->mode_info.num_hpd = 6;
4039 adev->mode_info.num_dig = 6;
4041 #if defined(CONFIG_DRM_AMD_DC_DCN)
4045 adev->mode_info.num_crtc = 4;
4046 adev->mode_info.num_hpd = 4;
4047 adev->mode_info.num_dig = 4;
4051 case CHIP_SIENNA_CICHLID:
4052 case CHIP_NAVY_FLOUNDER:
4053 adev->mode_info.num_crtc = 6;
4054 adev->mode_info.num_hpd = 6;
4055 adev->mode_info.num_dig = 6;
4057 case CHIP_YELLOW_CARP:
4058 adev->mode_info.num_crtc = 4;
4059 adev->mode_info.num_hpd = 4;
4060 adev->mode_info.num_dig = 4;
4063 case CHIP_DIMGREY_CAVEFISH:
4064 adev->mode_info.num_crtc = 5;
4065 adev->mode_info.num_hpd = 5;
4066 adev->mode_info.num_dig = 5;
4068 case CHIP_BEIGE_GOBY:
4069 adev->mode_info.num_crtc = 2;
4070 adev->mode_info.num_hpd = 2;
4071 adev->mode_info.num_dig = 2;
4075 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4079 amdgpu_dm_set_irq_funcs(adev);
4081 if (adev->mode_info.funcs == NULL)
4082 adev->mode_info.funcs = &dm_display_funcs;
4085 * Note: Do NOT change adev->audio_endpt_rreg and
4086 * adev->audio_endpt_wreg because they are initialised in
4087 * amdgpu_device_init()
4089 #if defined(CONFIG_DEBUG_KERNEL_DC)
4091 adev_to_drm(adev)->dev,
4092 &dev_attr_s3_debug);
4098 static bool modeset_required(struct drm_crtc_state *crtc_state,
4099 struct dc_stream_state *new_stream,
4100 struct dc_stream_state *old_stream)
4102 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4105 static bool modereset_required(struct drm_crtc_state *crtc_state)
4107 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4110 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4112 drm_encoder_cleanup(encoder);
4116 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4117 .destroy = amdgpu_dm_encoder_destroy,
4121 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4122 struct drm_framebuffer *fb,
4123 int *min_downscale, int *max_upscale)
4125 struct amdgpu_device *adev = drm_to_adev(dev);
4126 struct dc *dc = adev->dm.dc;
4127 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4128 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4130 switch (fb->format->format) {
4131 case DRM_FORMAT_P010:
4132 case DRM_FORMAT_NV12:
4133 case DRM_FORMAT_NV21:
4134 *max_upscale = plane_cap->max_upscale_factor.nv12;
4135 *min_downscale = plane_cap->max_downscale_factor.nv12;
4138 case DRM_FORMAT_XRGB16161616F:
4139 case DRM_FORMAT_ARGB16161616F:
4140 case DRM_FORMAT_XBGR16161616F:
4141 case DRM_FORMAT_ABGR16161616F:
4142 *max_upscale = plane_cap->max_upscale_factor.fp16;
4143 *min_downscale = plane_cap->max_downscale_factor.fp16;
4147 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4148 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4153 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4154 * scaling factor of 1.0 == 1000 units.
4156 if (*max_upscale == 1)
4157 *max_upscale = 1000;
4159 if (*min_downscale == 1)
4160 *min_downscale = 1000;
4164 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4165 struct dc_scaling_info *scaling_info)
4167 int scale_w, scale_h, min_downscale, max_upscale;
4169 memset(scaling_info, 0, sizeof(*scaling_info));
4171 /* Source is fixed 16.16 but we ignore mantissa for now... */
4172 scaling_info->src_rect.x = state->src_x >> 16;
4173 scaling_info->src_rect.y = state->src_y >> 16;
4176 * For reasons we don't (yet) fully understand a non-zero
4177 * src_y coordinate into an NV12 buffer can cause a
4178 * system hang. To avoid hangs (and maybe be overly cautious)
4179 * let's reject both non-zero src_x and src_y.
4181 * We currently know of only one use-case to reproduce a
4182 * scenario with non-zero src_x and src_y for NV12, which
4183 * is to gesture the YouTube Android app into full screen
4187 state->fb->format->format == DRM_FORMAT_NV12 &&
4188 (scaling_info->src_rect.x != 0 ||
4189 scaling_info->src_rect.y != 0))
4192 scaling_info->src_rect.width = state->src_w >> 16;
4193 if (scaling_info->src_rect.width == 0)
4196 scaling_info->src_rect.height = state->src_h >> 16;
4197 if (scaling_info->src_rect.height == 0)
4200 scaling_info->dst_rect.x = state->crtc_x;
4201 scaling_info->dst_rect.y = state->crtc_y;
4203 if (state->crtc_w == 0)
4206 scaling_info->dst_rect.width = state->crtc_w;
4208 if (state->crtc_h == 0)
4211 scaling_info->dst_rect.height = state->crtc_h;
4213 /* DRM doesn't specify clipping on destination output. */
4214 scaling_info->clip_rect = scaling_info->dst_rect;
4216 /* Validate scaling per-format with DC plane caps */
4217 if (state->plane && state->plane->dev && state->fb) {
4218 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4219 &min_downscale, &max_upscale);
4221 min_downscale = 250;
4222 max_upscale = 16000;
4225 scale_w = scaling_info->dst_rect.width * 1000 /
4226 scaling_info->src_rect.width;
4228 if (scale_w < min_downscale || scale_w > max_upscale)
4231 scale_h = scaling_info->dst_rect.height * 1000 /
4232 scaling_info->src_rect.height;
4234 if (scale_h < min_downscale || scale_h > max_upscale)
4238 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4239 * assume reasonable defaults based on the format.
4246 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4247 uint64_t tiling_flags)
4249 /* Fill GFX8 params */
4250 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4251 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4253 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4254 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4255 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4256 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4257 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4259 /* XXX fix me for VI */
4260 tiling_info->gfx8.num_banks = num_banks;
4261 tiling_info->gfx8.array_mode =
4262 DC_ARRAY_2D_TILED_THIN1;
4263 tiling_info->gfx8.tile_split = tile_split;
4264 tiling_info->gfx8.bank_width = bankw;
4265 tiling_info->gfx8.bank_height = bankh;
4266 tiling_info->gfx8.tile_aspect = mtaspect;
4267 tiling_info->gfx8.tile_mode =
4268 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4269 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4270 == DC_ARRAY_1D_TILED_THIN1) {
4271 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4274 tiling_info->gfx8.pipe_config =
4275 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4279 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4280 union dc_tiling_info *tiling_info)
4282 tiling_info->gfx9.num_pipes =
4283 adev->gfx.config.gb_addr_config_fields.num_pipes;
4284 tiling_info->gfx9.num_banks =
4285 adev->gfx.config.gb_addr_config_fields.num_banks;
4286 tiling_info->gfx9.pipe_interleave =
4287 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4288 tiling_info->gfx9.num_shader_engines =
4289 adev->gfx.config.gb_addr_config_fields.num_se;
4290 tiling_info->gfx9.max_compressed_frags =
4291 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4292 tiling_info->gfx9.num_rb_per_se =
4293 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4294 tiling_info->gfx9.shaderEnable = 1;
4295 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4296 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4297 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4298 adev->asic_type == CHIP_BEIGE_GOBY ||
4299 adev->asic_type == CHIP_YELLOW_CARP ||
4300 adev->asic_type == CHIP_VANGOGH)
4301 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4305 validate_dcc(struct amdgpu_device *adev,
4306 const enum surface_pixel_format format,
4307 const enum dc_rotation_angle rotation,
4308 const union dc_tiling_info *tiling_info,
4309 const struct dc_plane_dcc_param *dcc,
4310 const struct dc_plane_address *address,
4311 const struct plane_size *plane_size)
4313 struct dc *dc = adev->dm.dc;
4314 struct dc_dcc_surface_param input;
4315 struct dc_surface_dcc_cap output;
4317 memset(&input, 0, sizeof(input));
4318 memset(&output, 0, sizeof(output));
4323 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4324 !dc->cap_funcs.get_dcc_compression_cap)
4327 input.format = format;
4328 input.surface_size.width = plane_size->surface_size.width;
4329 input.surface_size.height = plane_size->surface_size.height;
4330 input.swizzle_mode = tiling_info->gfx9.swizzle;
4332 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4333 input.scan = SCAN_DIRECTION_HORIZONTAL;
4334 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4335 input.scan = SCAN_DIRECTION_VERTICAL;
4337 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4340 if (!output.capable)
4343 if (dcc->independent_64b_blks == 0 &&
4344 output.grph.rgb.independent_64b_blks != 0)
4351 modifier_has_dcc(uint64_t modifier)
4353 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4357 modifier_gfx9_swizzle_mode(uint64_t modifier)
4359 if (modifier == DRM_FORMAT_MOD_LINEAR)
4362 return AMD_FMT_MOD_GET(TILE, modifier);
4365 static const struct drm_format_info *
4366 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4368 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4372 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4373 union dc_tiling_info *tiling_info,
4376 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4377 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4378 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4379 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4381 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4383 if (!IS_AMD_FMT_MOD(modifier))
4386 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4387 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4389 if (adev->family >= AMDGPU_FAMILY_NV) {
4390 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4392 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4394 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4398 enum dm_micro_swizzle {
4399 MICRO_SWIZZLE_Z = 0,
4400 MICRO_SWIZZLE_S = 1,
4401 MICRO_SWIZZLE_D = 2,
4405 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4409 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4410 const struct drm_format_info *info = drm_format_info(format);
4413 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4419 * We always have to allow these modifiers:
4420 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4421 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4423 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4424 modifier == DRM_FORMAT_MOD_INVALID) {
4428 /* Check that the modifier is on the list of the plane's supported modifiers. */
4429 for (i = 0; i < plane->modifier_count; i++) {
4430 if (modifier == plane->modifiers[i])
4433 if (i == plane->modifier_count)
4437 * For D swizzle the canonical modifier depends on the bpp, so check
4440 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4441 adev->family >= AMDGPU_FAMILY_NV) {
4442 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4446 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4450 if (modifier_has_dcc(modifier)) {
4451 /* Per radeonsi comments 16/64 bpp are more complicated. */
4452 if (info->cpp[0] != 4)
4454 /* We support multi-planar formats, but not when combined with
4455 * additional DCC metadata planes. */
4456 if (info->num_planes > 1)
4464 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4469 if (*cap - *size < 1) {
4470 uint64_t new_cap = *cap * 2;
4471 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4479 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4485 (*mods)[*size] = mod;
4490 add_gfx9_modifiers(const struct amdgpu_device *adev,
4491 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4493 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4494 int pipe_xor_bits = min(8, pipes +
4495 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4496 int bank_xor_bits = min(8 - pipe_xor_bits,
4497 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4498 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4499 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4502 if (adev->family == AMDGPU_FAMILY_RV) {
4503 /* Raven2 and later */
4504 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4507 * No _D DCC swizzles yet because we only allow 32bpp, which
4508 * doesn't support _D on DCN
4511 if (has_constant_encode) {
4512 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4513 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4514 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4515 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4516 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4517 AMD_FMT_MOD_SET(DCC, 1) |
4518 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4519 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4520 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4523 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4524 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4525 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4526 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4527 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4528 AMD_FMT_MOD_SET(DCC, 1) |
4529 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4530 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4531 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4533 if (has_constant_encode) {
4534 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4536 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4537 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4539 AMD_FMT_MOD_SET(DCC, 1) |
4540 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4541 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4542 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4544 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4545 AMD_FMT_MOD_SET(RB, rb) |
4546 AMD_FMT_MOD_SET(PIPE, pipes));
4549 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4552 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4553 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4554 AMD_FMT_MOD_SET(DCC, 1) |
4555 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4556 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4557 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4558 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4559 AMD_FMT_MOD_SET(RB, rb) |
4560 AMD_FMT_MOD_SET(PIPE, pipes));
4564 * Only supported for 64bpp on Raven, will be filtered on format in
4565 * dm_plane_format_mod_supported.
4567 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4568 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4569 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4570 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4571 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4573 if (adev->family == AMDGPU_FAMILY_RV) {
4574 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4575 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4576 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4577 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4578 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4582 * Only supported for 64bpp on Raven, will be filtered on format in
4583 * dm_plane_format_mod_supported.
4585 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4586 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4587 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4589 if (adev->family == AMDGPU_FAMILY_RV) {
4590 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4597 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4598 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4600 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4602 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4603 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4604 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4605 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4606 AMD_FMT_MOD_SET(DCC, 1) |
4607 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4608 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4609 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4611 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4612 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4613 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4614 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4615 AMD_FMT_MOD_SET(DCC, 1) |
4616 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4617 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4618 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4619 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4621 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4623 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4624 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4626 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4628 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4629 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4632 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4633 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4634 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4635 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4637 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4638 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4639 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4643 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4644 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4646 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4647 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4649 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4650 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4651 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4652 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4653 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4654 AMD_FMT_MOD_SET(DCC, 1) |
4655 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4656 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4657 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4658 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4660 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4661 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4662 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4663 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4664 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4665 AMD_FMT_MOD_SET(DCC, 1) |
4666 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4667 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4668 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4669 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4670 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4672 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4673 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4674 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4675 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4676 AMD_FMT_MOD_SET(PACKERS, pkrs));
4678 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4679 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4680 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4681 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4682 AMD_FMT_MOD_SET(PACKERS, pkrs));
4684 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4685 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4686 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4687 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4689 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4690 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4691 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4695 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4697 uint64_t size = 0, capacity = 128;
4700 /* We have not hooked up any pre-GFX9 modifiers. */
4701 if (adev->family < AMDGPU_FAMILY_AI)
4704 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4706 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4707 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4708 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4709 return *mods ? 0 : -ENOMEM;
4712 switch (adev->family) {
4713 case AMDGPU_FAMILY_AI:
4714 case AMDGPU_FAMILY_RV:
4715 add_gfx9_modifiers(adev, mods, &size, &capacity);
4717 case AMDGPU_FAMILY_NV:
4718 case AMDGPU_FAMILY_VGH:
4719 case AMDGPU_FAMILY_YC:
4720 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4721 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4723 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4727 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4729 /* INVALID marks the end of the list. */
4730 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4739 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4740 const struct amdgpu_framebuffer *afb,
4741 const enum surface_pixel_format format,
4742 const enum dc_rotation_angle rotation,
4743 const struct plane_size *plane_size,
4744 union dc_tiling_info *tiling_info,
4745 struct dc_plane_dcc_param *dcc,
4746 struct dc_plane_address *address,
4747 const bool force_disable_dcc)
4749 const uint64_t modifier = afb->base.modifier;
4752 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4753 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4755 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4756 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4759 dcc->meta_pitch = afb->base.pitches[1];
4760 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4762 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4763 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4766 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4774 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4775 const struct amdgpu_framebuffer *afb,
4776 const enum surface_pixel_format format,
4777 const enum dc_rotation_angle rotation,
4778 const uint64_t tiling_flags,
4779 union dc_tiling_info *tiling_info,
4780 struct plane_size *plane_size,
4781 struct dc_plane_dcc_param *dcc,
4782 struct dc_plane_address *address,
4784 bool force_disable_dcc)
4786 const struct drm_framebuffer *fb = &afb->base;
4789 memset(tiling_info, 0, sizeof(*tiling_info));
4790 memset(plane_size, 0, sizeof(*plane_size));
4791 memset(dcc, 0, sizeof(*dcc));
4792 memset(address, 0, sizeof(*address));
4794 address->tmz_surface = tmz_surface;
4796 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4797 uint64_t addr = afb->address + fb->offsets[0];
4799 plane_size->surface_size.x = 0;
4800 plane_size->surface_size.y = 0;
4801 plane_size->surface_size.width = fb->width;
4802 plane_size->surface_size.height = fb->height;
4803 plane_size->surface_pitch =
4804 fb->pitches[0] / fb->format->cpp[0];
4806 address->type = PLN_ADDR_TYPE_GRAPHICS;
4807 address->grph.addr.low_part = lower_32_bits(addr);
4808 address->grph.addr.high_part = upper_32_bits(addr);
4809 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4810 uint64_t luma_addr = afb->address + fb->offsets[0];
4811 uint64_t chroma_addr = afb->address + fb->offsets[1];
4813 plane_size->surface_size.x = 0;
4814 plane_size->surface_size.y = 0;
4815 plane_size->surface_size.width = fb->width;
4816 plane_size->surface_size.height = fb->height;
4817 plane_size->surface_pitch =
4818 fb->pitches[0] / fb->format->cpp[0];
4820 plane_size->chroma_size.x = 0;
4821 plane_size->chroma_size.y = 0;
4822 /* TODO: set these based on surface format */
4823 plane_size->chroma_size.width = fb->width / 2;
4824 plane_size->chroma_size.height = fb->height / 2;
4826 plane_size->chroma_pitch =
4827 fb->pitches[1] / fb->format->cpp[1];
4829 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4830 address->video_progressive.luma_addr.low_part =
4831 lower_32_bits(luma_addr);
4832 address->video_progressive.luma_addr.high_part =
4833 upper_32_bits(luma_addr);
4834 address->video_progressive.chroma_addr.low_part =
4835 lower_32_bits(chroma_addr);
4836 address->video_progressive.chroma_addr.high_part =
4837 upper_32_bits(chroma_addr);
4840 if (adev->family >= AMDGPU_FAMILY_AI) {
4841 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4842 rotation, plane_size,
4849 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4856 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4857 bool *per_pixel_alpha, bool *global_alpha,
4858 int *global_alpha_value)
4860 *per_pixel_alpha = false;
4861 *global_alpha = false;
4862 *global_alpha_value = 0xff;
4864 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4867 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4868 static const uint32_t alpha_formats[] = {
4869 DRM_FORMAT_ARGB8888,
4870 DRM_FORMAT_RGBA8888,
4871 DRM_FORMAT_ABGR8888,
4873 uint32_t format = plane_state->fb->format->format;
4876 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4877 if (format == alpha_formats[i]) {
4878 *per_pixel_alpha = true;
4884 if (plane_state->alpha < 0xffff) {
4885 *global_alpha = true;
4886 *global_alpha_value = plane_state->alpha >> 8;
4891 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4892 const enum surface_pixel_format format,
4893 enum dc_color_space *color_space)
4897 *color_space = COLOR_SPACE_SRGB;
4899 /* DRM color properties only affect non-RGB formats. */
4900 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4903 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4905 switch (plane_state->color_encoding) {
4906 case DRM_COLOR_YCBCR_BT601:
4908 *color_space = COLOR_SPACE_YCBCR601;
4910 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4913 case DRM_COLOR_YCBCR_BT709:
4915 *color_space = COLOR_SPACE_YCBCR709;
4917 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4920 case DRM_COLOR_YCBCR_BT2020:
4922 *color_space = COLOR_SPACE_2020_YCBCR;
4935 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4936 const struct drm_plane_state *plane_state,
4937 const uint64_t tiling_flags,
4938 struct dc_plane_info *plane_info,
4939 struct dc_plane_address *address,
4941 bool force_disable_dcc)
4943 const struct drm_framebuffer *fb = plane_state->fb;
4944 const struct amdgpu_framebuffer *afb =
4945 to_amdgpu_framebuffer(plane_state->fb);
4948 memset(plane_info, 0, sizeof(*plane_info));
4950 switch (fb->format->format) {
4952 plane_info->format =
4953 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4955 case DRM_FORMAT_RGB565:
4956 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4958 case DRM_FORMAT_XRGB8888:
4959 case DRM_FORMAT_ARGB8888:
4960 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4962 case DRM_FORMAT_XRGB2101010:
4963 case DRM_FORMAT_ARGB2101010:
4964 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4966 case DRM_FORMAT_XBGR2101010:
4967 case DRM_FORMAT_ABGR2101010:
4968 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4970 case DRM_FORMAT_XBGR8888:
4971 case DRM_FORMAT_ABGR8888:
4972 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4974 case DRM_FORMAT_NV21:
4975 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4977 case DRM_FORMAT_NV12:
4978 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4980 case DRM_FORMAT_P010:
4981 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4983 case DRM_FORMAT_XRGB16161616F:
4984 case DRM_FORMAT_ARGB16161616F:
4985 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4987 case DRM_FORMAT_XBGR16161616F:
4988 case DRM_FORMAT_ABGR16161616F:
4989 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4991 case DRM_FORMAT_XRGB16161616:
4992 case DRM_FORMAT_ARGB16161616:
4993 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4995 case DRM_FORMAT_XBGR16161616:
4996 case DRM_FORMAT_ABGR16161616:
4997 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5001 "Unsupported screen format %p4cc\n",
5002 &fb->format->format);
5006 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5007 case DRM_MODE_ROTATE_0:
5008 plane_info->rotation = ROTATION_ANGLE_0;
5010 case DRM_MODE_ROTATE_90:
5011 plane_info->rotation = ROTATION_ANGLE_90;
5013 case DRM_MODE_ROTATE_180:
5014 plane_info->rotation = ROTATION_ANGLE_180;
5016 case DRM_MODE_ROTATE_270:
5017 plane_info->rotation = ROTATION_ANGLE_270;
5020 plane_info->rotation = ROTATION_ANGLE_0;
5024 plane_info->visible = true;
5025 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5027 plane_info->layer_index = 0;
5029 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5030 &plane_info->color_space);
5034 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5035 plane_info->rotation, tiling_flags,
5036 &plane_info->tiling_info,
5037 &plane_info->plane_size,
5038 &plane_info->dcc, address, tmz_surface,
5043 fill_blending_from_plane_state(
5044 plane_state, &plane_info->per_pixel_alpha,
5045 &plane_info->global_alpha, &plane_info->global_alpha_value);
5050 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5051 struct dc_plane_state *dc_plane_state,
5052 struct drm_plane_state *plane_state,
5053 struct drm_crtc_state *crtc_state)
5055 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5056 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5057 struct dc_scaling_info scaling_info;
5058 struct dc_plane_info plane_info;
5060 bool force_disable_dcc = false;
5062 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5066 dc_plane_state->src_rect = scaling_info.src_rect;
5067 dc_plane_state->dst_rect = scaling_info.dst_rect;
5068 dc_plane_state->clip_rect = scaling_info.clip_rect;
5069 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5071 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5072 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5075 &dc_plane_state->address,
5081 dc_plane_state->format = plane_info.format;
5082 dc_plane_state->color_space = plane_info.color_space;
5083 dc_plane_state->format = plane_info.format;
5084 dc_plane_state->plane_size = plane_info.plane_size;
5085 dc_plane_state->rotation = plane_info.rotation;
5086 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5087 dc_plane_state->stereo_format = plane_info.stereo_format;
5088 dc_plane_state->tiling_info = plane_info.tiling_info;
5089 dc_plane_state->visible = plane_info.visible;
5090 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5091 dc_plane_state->global_alpha = plane_info.global_alpha;
5092 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5093 dc_plane_state->dcc = plane_info.dcc;
5094 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5095 dc_plane_state->flip_int_enabled = true;
5098 * Always set input transfer function, since plane state is refreshed
5101 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5108 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5109 const struct dm_connector_state *dm_state,
5110 struct dc_stream_state *stream)
5112 enum amdgpu_rmx_type rmx_type;
5114 struct rect src = { 0 }; /* viewport in composition space*/
5115 struct rect dst = { 0 }; /* stream addressable area */
5117 /* no mode. nothing to be done */
5121 /* Full screen scaling by default */
5122 src.width = mode->hdisplay;
5123 src.height = mode->vdisplay;
5124 dst.width = stream->timing.h_addressable;
5125 dst.height = stream->timing.v_addressable;
5128 rmx_type = dm_state->scaling;
5129 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5130 if (src.width * dst.height <
5131 src.height * dst.width) {
5132 /* height needs less upscaling/more downscaling */
5133 dst.width = src.width *
5134 dst.height / src.height;
5136 /* width needs less upscaling/more downscaling */
5137 dst.height = src.height *
5138 dst.width / src.width;
5140 } else if (rmx_type == RMX_CENTER) {
5144 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5145 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5147 if (dm_state->underscan_enable) {
5148 dst.x += dm_state->underscan_hborder / 2;
5149 dst.y += dm_state->underscan_vborder / 2;
5150 dst.width -= dm_state->underscan_hborder;
5151 dst.height -= dm_state->underscan_vborder;
5158 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5159 dst.x, dst.y, dst.width, dst.height);
5163 static enum dc_color_depth
5164 convert_color_depth_from_display_info(const struct drm_connector *connector,
5165 bool is_y420, int requested_bpc)
5172 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5173 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5175 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5177 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5180 bpc = (uint8_t)connector->display_info.bpc;
5181 /* Assume 8 bpc by default if no bpc is specified. */
5182 bpc = bpc ? bpc : 8;
5185 if (requested_bpc > 0) {
5187 * Cap display bpc based on the user requested value.
5189 * The value for state->max_bpc may not correctly updated
5190 * depending on when the connector gets added to the state
5191 * or if this was called outside of atomic check, so it
5192 * can't be used directly.
5194 bpc = min_t(u8, bpc, requested_bpc);
5196 /* Round down to the nearest even number. */
5197 bpc = bpc - (bpc & 1);
5203 * Temporary Work around, DRM doesn't parse color depth for
5204 * EDID revision before 1.4
5205 * TODO: Fix edid parsing
5207 return COLOR_DEPTH_888;
5209 return COLOR_DEPTH_666;
5211 return COLOR_DEPTH_888;
5213 return COLOR_DEPTH_101010;
5215 return COLOR_DEPTH_121212;
5217 return COLOR_DEPTH_141414;
5219 return COLOR_DEPTH_161616;
5221 return COLOR_DEPTH_UNDEFINED;
5225 static enum dc_aspect_ratio
5226 get_aspect_ratio(const struct drm_display_mode *mode_in)
5228 /* 1-1 mapping, since both enums follow the HDMI spec. */
5229 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5232 static enum dc_color_space
5233 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5235 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5237 switch (dc_crtc_timing->pixel_encoding) {
5238 case PIXEL_ENCODING_YCBCR422:
5239 case PIXEL_ENCODING_YCBCR444:
5240 case PIXEL_ENCODING_YCBCR420:
5243 * 27030khz is the separation point between HDTV and SDTV
5244 * according to HDMI spec, we use YCbCr709 and YCbCr601
5247 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5248 if (dc_crtc_timing->flags.Y_ONLY)
5250 COLOR_SPACE_YCBCR709_LIMITED;
5252 color_space = COLOR_SPACE_YCBCR709;
5254 if (dc_crtc_timing->flags.Y_ONLY)
5256 COLOR_SPACE_YCBCR601_LIMITED;
5258 color_space = COLOR_SPACE_YCBCR601;
5263 case PIXEL_ENCODING_RGB:
5264 color_space = COLOR_SPACE_SRGB;
5275 static bool adjust_colour_depth_from_display_info(
5276 struct dc_crtc_timing *timing_out,
5277 const struct drm_display_info *info)
5279 enum dc_color_depth depth = timing_out->display_color_depth;
5282 normalized_clk = timing_out->pix_clk_100hz / 10;
5283 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5284 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5285 normalized_clk /= 2;
5286 /* Adjusting pix clock following on HDMI spec based on colour depth */
5288 case COLOR_DEPTH_888:
5290 case COLOR_DEPTH_101010:
5291 normalized_clk = (normalized_clk * 30) / 24;
5293 case COLOR_DEPTH_121212:
5294 normalized_clk = (normalized_clk * 36) / 24;
5296 case COLOR_DEPTH_161616:
5297 normalized_clk = (normalized_clk * 48) / 24;
5300 /* The above depths are the only ones valid for HDMI. */
5303 if (normalized_clk <= info->max_tmds_clock) {
5304 timing_out->display_color_depth = depth;
5307 } while (--depth > COLOR_DEPTH_666);
5311 static void fill_stream_properties_from_drm_display_mode(
5312 struct dc_stream_state *stream,
5313 const struct drm_display_mode *mode_in,
5314 const struct drm_connector *connector,
5315 const struct drm_connector_state *connector_state,
5316 const struct dc_stream_state *old_stream,
5319 struct dc_crtc_timing *timing_out = &stream->timing;
5320 const struct drm_display_info *info = &connector->display_info;
5321 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5322 struct hdmi_vendor_infoframe hv_frame;
5323 struct hdmi_avi_infoframe avi_frame;
5325 memset(&hv_frame, 0, sizeof(hv_frame));
5326 memset(&avi_frame, 0, sizeof(avi_frame));
5328 timing_out->h_border_left = 0;
5329 timing_out->h_border_right = 0;
5330 timing_out->v_border_top = 0;
5331 timing_out->v_border_bottom = 0;
5332 /* TODO: un-hardcode */
5333 if (drm_mode_is_420_only(info, mode_in)
5334 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5335 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5336 else if (drm_mode_is_420_also(info, mode_in)
5337 && aconnector->force_yuv420_output)
5338 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5339 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5340 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5341 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5343 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5345 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5346 timing_out->display_color_depth = convert_color_depth_from_display_info(
5348 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5350 timing_out->scan_type = SCANNING_TYPE_NODATA;
5351 timing_out->hdmi_vic = 0;
5354 timing_out->vic = old_stream->timing.vic;
5355 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5356 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5358 timing_out->vic = drm_match_cea_mode(mode_in);
5359 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5360 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5361 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5362 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5365 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5366 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5367 timing_out->vic = avi_frame.video_code;
5368 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5369 timing_out->hdmi_vic = hv_frame.vic;
5372 if (is_freesync_video_mode(mode_in, aconnector)) {
5373 timing_out->h_addressable = mode_in->hdisplay;
5374 timing_out->h_total = mode_in->htotal;
5375 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5376 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5377 timing_out->v_total = mode_in->vtotal;
5378 timing_out->v_addressable = mode_in->vdisplay;
5379 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5380 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5381 timing_out->pix_clk_100hz = mode_in->clock * 10;
5383 timing_out->h_addressable = mode_in->crtc_hdisplay;
5384 timing_out->h_total = mode_in->crtc_htotal;
5385 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5386 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5387 timing_out->v_total = mode_in->crtc_vtotal;
5388 timing_out->v_addressable = mode_in->crtc_vdisplay;
5389 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5390 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5391 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5394 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5396 stream->output_color_space = get_output_color_space(timing_out);
5398 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5399 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5400 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5401 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5402 drm_mode_is_420_also(info, mode_in) &&
5403 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5404 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5405 adjust_colour_depth_from_display_info(timing_out, info);
5410 static void fill_audio_info(struct audio_info *audio_info,
5411 const struct drm_connector *drm_connector,
5412 const struct dc_sink *dc_sink)
5415 int cea_revision = 0;
5416 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5418 audio_info->manufacture_id = edid_caps->manufacturer_id;
5419 audio_info->product_id = edid_caps->product_id;
5421 cea_revision = drm_connector->display_info.cea_rev;
5423 strscpy(audio_info->display_name,
5424 edid_caps->display_name,
5425 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5427 if (cea_revision >= 3) {
5428 audio_info->mode_count = edid_caps->audio_mode_count;
5430 for (i = 0; i < audio_info->mode_count; ++i) {
5431 audio_info->modes[i].format_code =
5432 (enum audio_format_code)
5433 (edid_caps->audio_modes[i].format_code);
5434 audio_info->modes[i].channel_count =
5435 edid_caps->audio_modes[i].channel_count;
5436 audio_info->modes[i].sample_rates.all =
5437 edid_caps->audio_modes[i].sample_rate;
5438 audio_info->modes[i].sample_size =
5439 edid_caps->audio_modes[i].sample_size;
5443 audio_info->flags.all = edid_caps->speaker_flags;
5445 /* TODO: We only check for the progressive mode, check for interlace mode too */
5446 if (drm_connector->latency_present[0]) {
5447 audio_info->video_latency = drm_connector->video_latency[0];
5448 audio_info->audio_latency = drm_connector->audio_latency[0];
5451 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5456 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5457 struct drm_display_mode *dst_mode)
5459 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5460 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5461 dst_mode->crtc_clock = src_mode->crtc_clock;
5462 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5463 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5464 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5465 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5466 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5467 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5468 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5469 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5470 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5471 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5472 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5476 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5477 const struct drm_display_mode *native_mode,
5480 if (scale_enabled) {
5481 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5482 } else if (native_mode->clock == drm_mode->clock &&
5483 native_mode->htotal == drm_mode->htotal &&
5484 native_mode->vtotal == drm_mode->vtotal) {
5485 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5487 /* no scaling nor amdgpu inserted, no need to patch */
5491 static struct dc_sink *
5492 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5494 struct dc_sink_init_data sink_init_data = { 0 };
5495 struct dc_sink *sink = NULL;
5496 sink_init_data.link = aconnector->dc_link;
5497 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5499 sink = dc_sink_create(&sink_init_data);
5501 DRM_ERROR("Failed to create sink!\n");
5504 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5509 static void set_multisync_trigger_params(
5510 struct dc_stream_state *stream)
5512 struct dc_stream_state *master = NULL;
5514 if (stream->triggered_crtc_reset.enabled) {
5515 master = stream->triggered_crtc_reset.event_source;
5516 stream->triggered_crtc_reset.event =
5517 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5518 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5519 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5523 static void set_master_stream(struct dc_stream_state *stream_set[],
5526 int j, highest_rfr = 0, master_stream = 0;
5528 for (j = 0; j < stream_count; j++) {
5529 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5530 int refresh_rate = 0;
5532 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5533 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5534 if (refresh_rate > highest_rfr) {
5535 highest_rfr = refresh_rate;
5540 for (j = 0; j < stream_count; j++) {
5542 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5546 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5549 struct dc_stream_state *stream;
5551 if (context->stream_count < 2)
5553 for (i = 0; i < context->stream_count ; i++) {
5554 if (!context->streams[i])
5557 * TODO: add a function to read AMD VSDB bits and set
5558 * crtc_sync_master.multi_sync_enabled flag
5559 * For now it's set to false
5563 set_master_stream(context->streams, context->stream_count);
5565 for (i = 0; i < context->stream_count ; i++) {
5566 stream = context->streams[i];
5571 set_multisync_trigger_params(stream);
5575 #if defined(CONFIG_DRM_AMD_DC_DCN)
5576 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5577 struct dc_sink *sink, struct dc_stream_state *stream,
5578 struct dsc_dec_dpcd_caps *dsc_caps)
5580 stream->timing.flags.DSC = 0;
5582 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5583 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5584 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5585 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5590 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5591 struct dc_sink *sink, struct dc_stream_state *stream,
5592 struct dsc_dec_dpcd_caps *dsc_caps)
5594 struct drm_connector *drm_connector = &aconnector->base;
5595 uint32_t link_bandwidth_kbps;
5597 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5598 dc_link_get_link_cap(aconnector->dc_link));
5599 /* Set DSC policy according to dsc_clock_en */
5600 dc_dsc_policy_set_enable_dsc_when_not_needed(
5601 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5603 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5605 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5607 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5609 link_bandwidth_kbps,
5611 &stream->timing.dsc_cfg)) {
5612 stream->timing.flags.DSC = 1;
5613 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5617 /* Overwrite the stream flag if DSC is enabled through debugfs */
5618 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5619 stream->timing.flags.DSC = 1;
5621 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5622 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5624 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5625 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5627 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5628 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5633 * DOC: FreeSync Video
5635 * When a userspace application wants to play a video, the content follows a
5636 * standard format definition that usually specifies the FPS for that format.
5637 * The below list illustrates some video format and the expected FPS,
5640 * - TV/NTSC (23.976 FPS)
5643 * - TV/NTSC (29.97 FPS)
5644 * - TV/NTSC (30 FPS)
5645 * - Cinema HFR (48 FPS)
5647 * - Commonly used (60 FPS)
5648 * - Multiples of 24 (48,72,96 FPS)
5650 * The list of standards video format is not huge and can be added to the
5651 * connector modeset list beforehand. With that, userspace can leverage
5652 * FreeSync to extends the front porch in order to attain the target refresh
5653 * rate. Such a switch will happen seamlessly, without screen blanking or
5654 * reprogramming of the output in any other way. If the userspace requests a
5655 * modesetting change compatible with FreeSync modes that only differ in the
5656 * refresh rate, DC will skip the full update and avoid blink during the
5657 * transition. For example, the video player can change the modesetting from
5658 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5659 * causing any display blink. This same concept can be applied to a mode
5662 static struct drm_display_mode *
5663 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5664 bool use_probed_modes)
5666 struct drm_display_mode *m, *m_pref = NULL;
5667 u16 current_refresh, highest_refresh;
5668 struct list_head *list_head = use_probed_modes ?
5669 &aconnector->base.probed_modes :
5670 &aconnector->base.modes;
5672 if (aconnector->freesync_vid_base.clock != 0)
5673 return &aconnector->freesync_vid_base;
5675 /* Find the preferred mode */
5676 list_for_each_entry (m, list_head, head) {
5677 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5684 /* Probably an EDID with no preferred mode. Fallback to first entry */
5685 m_pref = list_first_entry_or_null(
5686 &aconnector->base.modes, struct drm_display_mode, head);
5688 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5693 highest_refresh = drm_mode_vrefresh(m_pref);
5696 * Find the mode with highest refresh rate with same resolution.
5697 * For some monitors, preferred mode is not the mode with highest
5698 * supported refresh rate.
5700 list_for_each_entry (m, list_head, head) {
5701 current_refresh = drm_mode_vrefresh(m);
5703 if (m->hdisplay == m_pref->hdisplay &&
5704 m->vdisplay == m_pref->vdisplay &&
5705 highest_refresh < current_refresh) {
5706 highest_refresh = current_refresh;
5711 aconnector->freesync_vid_base = *m_pref;
5715 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5716 struct amdgpu_dm_connector *aconnector)
5718 struct drm_display_mode *high_mode;
5721 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5722 if (!high_mode || !mode)
5725 timing_diff = high_mode->vtotal - mode->vtotal;
5727 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5728 high_mode->hdisplay != mode->hdisplay ||
5729 high_mode->vdisplay != mode->vdisplay ||
5730 high_mode->hsync_start != mode->hsync_start ||
5731 high_mode->hsync_end != mode->hsync_end ||
5732 high_mode->htotal != mode->htotal ||
5733 high_mode->hskew != mode->hskew ||
5734 high_mode->vscan != mode->vscan ||
5735 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5736 high_mode->vsync_end - mode->vsync_end != timing_diff)
5742 static struct dc_stream_state *
5743 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5744 const struct drm_display_mode *drm_mode,
5745 const struct dm_connector_state *dm_state,
5746 const struct dc_stream_state *old_stream,
5749 struct drm_display_mode *preferred_mode = NULL;
5750 struct drm_connector *drm_connector;
5751 const struct drm_connector_state *con_state =
5752 dm_state ? &dm_state->base : NULL;
5753 struct dc_stream_state *stream = NULL;
5754 struct drm_display_mode mode = *drm_mode;
5755 struct drm_display_mode saved_mode;
5756 struct drm_display_mode *freesync_mode = NULL;
5757 bool native_mode_found = false;
5758 bool recalculate_timing = false;
5759 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5761 int preferred_refresh = 0;
5762 #if defined(CONFIG_DRM_AMD_DC_DCN)
5763 struct dsc_dec_dpcd_caps dsc_caps;
5765 struct dc_sink *sink = NULL;
5767 memset(&saved_mode, 0, sizeof(saved_mode));
5769 if (aconnector == NULL) {
5770 DRM_ERROR("aconnector is NULL!\n");
5774 drm_connector = &aconnector->base;
5776 if (!aconnector->dc_sink) {
5777 sink = create_fake_sink(aconnector);
5781 sink = aconnector->dc_sink;
5782 dc_sink_retain(sink);
5785 stream = dc_create_stream_for_sink(sink);
5787 if (stream == NULL) {
5788 DRM_ERROR("Failed to create stream for sink!\n");
5792 stream->dm_stream_context = aconnector;
5794 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5795 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5797 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5798 /* Search for preferred mode */
5799 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5800 native_mode_found = true;
5804 if (!native_mode_found)
5805 preferred_mode = list_first_entry_or_null(
5806 &aconnector->base.modes,
5807 struct drm_display_mode,
5810 mode_refresh = drm_mode_vrefresh(&mode);
5812 if (preferred_mode == NULL) {
5814 * This may not be an error, the use case is when we have no
5815 * usermode calls to reset and set mode upon hotplug. In this
5816 * case, we call set mode ourselves to restore the previous mode
5817 * and the modelist may not be filled in in time.
5819 DRM_DEBUG_DRIVER("No preferred mode found\n");
5821 recalculate_timing = amdgpu_freesync_vid_mode &&
5822 is_freesync_video_mode(&mode, aconnector);
5823 if (recalculate_timing) {
5824 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5826 mode = *freesync_mode;
5828 decide_crtc_timing_for_drm_display_mode(
5829 &mode, preferred_mode, scale);
5831 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5835 if (recalculate_timing)
5836 drm_mode_set_crtcinfo(&saved_mode, 0);
5838 drm_mode_set_crtcinfo(&mode, 0);
5841 * If scaling is enabled and refresh rate didn't change
5842 * we copy the vic and polarities of the old timings
5844 if (!scale || mode_refresh != preferred_refresh)
5845 fill_stream_properties_from_drm_display_mode(
5846 stream, &mode, &aconnector->base, con_state, NULL,
5849 fill_stream_properties_from_drm_display_mode(
5850 stream, &mode, &aconnector->base, con_state, old_stream,
5853 #if defined(CONFIG_DRM_AMD_DC_DCN)
5854 /* SST DSC determination policy */
5855 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5856 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5857 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5860 update_stream_scaling_settings(&mode, dm_state, stream);
5863 &stream->audio_info,
5867 update_stream_signal(stream, sink);
5869 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5870 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5872 if (stream->link->psr_settings.psr_feature_enabled) {
5874 // should decide stream support vsc sdp colorimetry capability
5875 // before building vsc info packet
5877 stream->use_vsc_sdp_for_colorimetry = false;
5878 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5879 stream->use_vsc_sdp_for_colorimetry =
5880 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5882 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5883 stream->use_vsc_sdp_for_colorimetry = true;
5885 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5886 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5890 dc_sink_release(sink);
5895 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5897 drm_crtc_cleanup(crtc);
5901 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5902 struct drm_crtc_state *state)
5904 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5906 /* TODO Destroy dc_stream objects are stream object is flattened */
5908 dc_stream_release(cur->stream);
5911 __drm_atomic_helper_crtc_destroy_state(state);
5917 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5919 struct dm_crtc_state *state;
5922 dm_crtc_destroy_state(crtc, crtc->state);
5924 state = kzalloc(sizeof(*state), GFP_KERNEL);
5925 if (WARN_ON(!state))
5928 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5931 static struct drm_crtc_state *
5932 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5934 struct dm_crtc_state *state, *cur;
5936 cur = to_dm_crtc_state(crtc->state);
5938 if (WARN_ON(!crtc->state))
5941 state = kzalloc(sizeof(*state), GFP_KERNEL);
5945 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5948 state->stream = cur->stream;
5949 dc_stream_retain(state->stream);
5952 state->active_planes = cur->active_planes;
5953 state->vrr_infopacket = cur->vrr_infopacket;
5954 state->abm_level = cur->abm_level;
5955 state->vrr_supported = cur->vrr_supported;
5956 state->freesync_config = cur->freesync_config;
5957 state->cm_has_degamma = cur->cm_has_degamma;
5958 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5959 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5961 return &state->base;
5964 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5965 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5967 crtc_debugfs_init(crtc);
5973 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5975 enum dc_irq_source irq_source;
5976 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5977 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5980 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5982 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5984 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5985 acrtc->crtc_id, enable ? "en" : "dis", rc);
5989 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5991 enum dc_irq_source irq_source;
5992 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5993 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5994 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5995 #if defined(CONFIG_DRM_AMD_DC_DCN)
5996 struct amdgpu_display_manager *dm = &adev->dm;
5997 unsigned long flags;
6002 /* vblank irq on -> Only need vupdate irq in vrr mode */
6003 if (amdgpu_dm_vrr_active(acrtc_state))
6004 rc = dm_set_vupdate_irq(crtc, true);
6006 /* vblank irq off -> vupdate irq off */
6007 rc = dm_set_vupdate_irq(crtc, false);
6013 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6015 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6018 if (amdgpu_in_reset(adev))
6021 #if defined(CONFIG_DRM_AMD_DC_DCN)
6022 spin_lock_irqsave(&dm->vblank_lock, flags);
6023 dm->vblank_workqueue->dm = dm;
6024 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6025 dm->vblank_workqueue->enable = enable;
6026 spin_unlock_irqrestore(&dm->vblank_lock, flags);
6027 schedule_work(&dm->vblank_workqueue->mall_work);
6033 static int dm_enable_vblank(struct drm_crtc *crtc)
6035 return dm_set_vblank(crtc, true);
6038 static void dm_disable_vblank(struct drm_crtc *crtc)
6040 dm_set_vblank(crtc, false);
6043 /* Implemented only the options currently availible for the driver */
6044 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6045 .reset = dm_crtc_reset_state,
6046 .destroy = amdgpu_dm_crtc_destroy,
6047 .set_config = drm_atomic_helper_set_config,
6048 .page_flip = drm_atomic_helper_page_flip,
6049 .atomic_duplicate_state = dm_crtc_duplicate_state,
6050 .atomic_destroy_state = dm_crtc_destroy_state,
6051 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6052 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6053 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6054 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6055 .enable_vblank = dm_enable_vblank,
6056 .disable_vblank = dm_disable_vblank,
6057 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6058 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6059 .late_register = amdgpu_dm_crtc_late_register,
6063 static enum drm_connector_status
6064 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6067 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6071 * 1. This interface is NOT called in context of HPD irq.
6072 * 2. This interface *is called* in context of user-mode ioctl. Which
6073 * makes it a bad place for *any* MST-related activity.
6076 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6077 !aconnector->fake_enable)
6078 connected = (aconnector->dc_sink != NULL);
6080 connected = (aconnector->base.force == DRM_FORCE_ON);
6082 update_subconnector_property(aconnector);
6084 return (connected ? connector_status_connected :
6085 connector_status_disconnected);
6088 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6089 struct drm_connector_state *connector_state,
6090 struct drm_property *property,
6093 struct drm_device *dev = connector->dev;
6094 struct amdgpu_device *adev = drm_to_adev(dev);
6095 struct dm_connector_state *dm_old_state =
6096 to_dm_connector_state(connector->state);
6097 struct dm_connector_state *dm_new_state =
6098 to_dm_connector_state(connector_state);
6102 if (property == dev->mode_config.scaling_mode_property) {
6103 enum amdgpu_rmx_type rmx_type;
6106 case DRM_MODE_SCALE_CENTER:
6107 rmx_type = RMX_CENTER;
6109 case DRM_MODE_SCALE_ASPECT:
6110 rmx_type = RMX_ASPECT;
6112 case DRM_MODE_SCALE_FULLSCREEN:
6113 rmx_type = RMX_FULL;
6115 case DRM_MODE_SCALE_NONE:
6121 if (dm_old_state->scaling == rmx_type)
6124 dm_new_state->scaling = rmx_type;
6126 } else if (property == adev->mode_info.underscan_hborder_property) {
6127 dm_new_state->underscan_hborder = val;
6129 } else if (property == adev->mode_info.underscan_vborder_property) {
6130 dm_new_state->underscan_vborder = val;
6132 } else if (property == adev->mode_info.underscan_property) {
6133 dm_new_state->underscan_enable = val;
6135 } else if (property == adev->mode_info.abm_level_property) {
6136 dm_new_state->abm_level = val;
6143 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6144 const struct drm_connector_state *state,
6145 struct drm_property *property,
6148 struct drm_device *dev = connector->dev;
6149 struct amdgpu_device *adev = drm_to_adev(dev);
6150 struct dm_connector_state *dm_state =
6151 to_dm_connector_state(state);
6154 if (property == dev->mode_config.scaling_mode_property) {
6155 switch (dm_state->scaling) {
6157 *val = DRM_MODE_SCALE_CENTER;
6160 *val = DRM_MODE_SCALE_ASPECT;
6163 *val = DRM_MODE_SCALE_FULLSCREEN;
6167 *val = DRM_MODE_SCALE_NONE;
6171 } else if (property == adev->mode_info.underscan_hborder_property) {
6172 *val = dm_state->underscan_hborder;
6174 } else if (property == adev->mode_info.underscan_vborder_property) {
6175 *val = dm_state->underscan_vborder;
6177 } else if (property == adev->mode_info.underscan_property) {
6178 *val = dm_state->underscan_enable;
6180 } else if (property == adev->mode_info.abm_level_property) {
6181 *val = dm_state->abm_level;
6188 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6190 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6192 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6195 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6197 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6198 const struct dc_link *link = aconnector->dc_link;
6199 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6200 struct amdgpu_display_manager *dm = &adev->dm;
6203 * Call only if mst_mgr was iniitalized before since it's not done
6204 * for all connector types.
6206 if (aconnector->mst_mgr.dev)
6207 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6209 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6210 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6212 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6213 link->type != dc_connection_none &&
6214 dm->backlight_dev) {
6215 backlight_device_unregister(dm->backlight_dev);
6216 dm->backlight_dev = NULL;
6220 if (aconnector->dc_em_sink)
6221 dc_sink_release(aconnector->dc_em_sink);
6222 aconnector->dc_em_sink = NULL;
6223 if (aconnector->dc_sink)
6224 dc_sink_release(aconnector->dc_sink);
6225 aconnector->dc_sink = NULL;
6227 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6228 drm_connector_unregister(connector);
6229 drm_connector_cleanup(connector);
6230 if (aconnector->i2c) {
6231 i2c_del_adapter(&aconnector->i2c->base);
6232 kfree(aconnector->i2c);
6234 kfree(aconnector->dm_dp_aux.aux.name);
6239 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6241 struct dm_connector_state *state =
6242 to_dm_connector_state(connector->state);
6244 if (connector->state)
6245 __drm_atomic_helper_connector_destroy_state(connector->state);
6249 state = kzalloc(sizeof(*state), GFP_KERNEL);
6252 state->scaling = RMX_OFF;
6253 state->underscan_enable = false;
6254 state->underscan_hborder = 0;
6255 state->underscan_vborder = 0;
6256 state->base.max_requested_bpc = 8;
6257 state->vcpi_slots = 0;
6259 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6260 state->abm_level = amdgpu_dm_abm_level;
6262 __drm_atomic_helper_connector_reset(connector, &state->base);
6266 struct drm_connector_state *
6267 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6269 struct dm_connector_state *state =
6270 to_dm_connector_state(connector->state);
6272 struct dm_connector_state *new_state =
6273 kmemdup(state, sizeof(*state), GFP_KERNEL);
6278 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6280 new_state->freesync_capable = state->freesync_capable;
6281 new_state->abm_level = state->abm_level;
6282 new_state->scaling = state->scaling;
6283 new_state->underscan_enable = state->underscan_enable;
6284 new_state->underscan_hborder = state->underscan_hborder;
6285 new_state->underscan_vborder = state->underscan_vborder;
6286 new_state->vcpi_slots = state->vcpi_slots;
6287 new_state->pbn = state->pbn;
6288 return &new_state->base;
6292 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6294 struct amdgpu_dm_connector *amdgpu_dm_connector =
6295 to_amdgpu_dm_connector(connector);
6298 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6299 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6300 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6301 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6306 #if defined(CONFIG_DEBUG_FS)
6307 connector_debugfs_init(amdgpu_dm_connector);
6313 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6314 .reset = amdgpu_dm_connector_funcs_reset,
6315 .detect = amdgpu_dm_connector_detect,
6316 .fill_modes = drm_helper_probe_single_connector_modes,
6317 .destroy = amdgpu_dm_connector_destroy,
6318 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6319 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6320 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6321 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6322 .late_register = amdgpu_dm_connector_late_register,
6323 .early_unregister = amdgpu_dm_connector_unregister
6326 static int get_modes(struct drm_connector *connector)
6328 return amdgpu_dm_connector_get_modes(connector);
6331 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6333 struct dc_sink_init_data init_params = {
6334 .link = aconnector->dc_link,
6335 .sink_signal = SIGNAL_TYPE_VIRTUAL
6339 if (!aconnector->base.edid_blob_ptr) {
6340 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6341 aconnector->base.name);
6343 aconnector->base.force = DRM_FORCE_OFF;
6344 aconnector->base.override_edid = false;
6348 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6350 aconnector->edid = edid;
6352 aconnector->dc_em_sink = dc_link_add_remote_sink(
6353 aconnector->dc_link,
6355 (edid->extensions + 1) * EDID_LENGTH,
6358 if (aconnector->base.force == DRM_FORCE_ON) {
6359 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6360 aconnector->dc_link->local_sink :
6361 aconnector->dc_em_sink;
6362 dc_sink_retain(aconnector->dc_sink);
6366 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6368 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6371 * In case of headless boot with force on for DP managed connector
6372 * Those settings have to be != 0 to get initial modeset
6374 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6375 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6376 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6380 aconnector->base.override_edid = true;
6381 create_eml_sink(aconnector);
6384 static struct dc_stream_state *
6385 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6386 const struct drm_display_mode *drm_mode,
6387 const struct dm_connector_state *dm_state,
6388 const struct dc_stream_state *old_stream)
6390 struct drm_connector *connector = &aconnector->base;
6391 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6392 struct dc_stream_state *stream;
6393 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6394 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6395 enum dc_status dc_result = DC_OK;
6398 stream = create_stream_for_sink(aconnector, drm_mode,
6399 dm_state, old_stream,
6401 if (stream == NULL) {
6402 DRM_ERROR("Failed to create stream for sink!\n");
6406 dc_result = dc_validate_stream(adev->dm.dc, stream);
6408 if (dc_result != DC_OK) {
6409 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6414 dc_status_to_str(dc_result));
6416 dc_stream_release(stream);
6418 requested_bpc -= 2; /* lower bpc to retry validation */
6421 } while (stream == NULL && requested_bpc >= 6);
6423 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6424 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6426 aconnector->force_yuv420_output = true;
6427 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6428 dm_state, old_stream);
6429 aconnector->force_yuv420_output = false;
6435 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6436 struct drm_display_mode *mode)
6438 int result = MODE_ERROR;
6439 struct dc_sink *dc_sink;
6440 /* TODO: Unhardcode stream count */
6441 struct dc_stream_state *stream;
6442 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6444 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6445 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6449 * Only run this the first time mode_valid is called to initilialize
6452 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6453 !aconnector->dc_em_sink)
6454 handle_edid_mgmt(aconnector);
6456 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6458 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6459 aconnector->base.force != DRM_FORCE_ON) {
6460 DRM_ERROR("dc_sink is NULL!\n");
6464 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6466 dc_stream_release(stream);
6471 /* TODO: error handling*/
6475 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6476 struct dc_info_packet *out)
6478 struct hdmi_drm_infoframe frame;
6479 unsigned char buf[30]; /* 26 + 4 */
6483 memset(out, 0, sizeof(*out));
6485 if (!state->hdr_output_metadata)
6488 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6492 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6496 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6500 /* Prepare the infopacket for DC. */
6501 switch (state->connector->connector_type) {
6502 case DRM_MODE_CONNECTOR_HDMIA:
6503 out->hb0 = 0x87; /* type */
6504 out->hb1 = 0x01; /* version */
6505 out->hb2 = 0x1A; /* length */
6506 out->sb[0] = buf[3]; /* checksum */
6510 case DRM_MODE_CONNECTOR_DisplayPort:
6511 case DRM_MODE_CONNECTOR_eDP:
6512 out->hb0 = 0x00; /* sdp id, zero */
6513 out->hb1 = 0x87; /* type */
6514 out->hb2 = 0x1D; /* payload len - 1 */
6515 out->hb3 = (0x13 << 2); /* sdp version */
6516 out->sb[0] = 0x01; /* version */
6517 out->sb[1] = 0x1A; /* length */
6525 memcpy(&out->sb[i], &buf[4], 26);
6528 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6529 sizeof(out->sb), false);
6535 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6536 struct drm_atomic_state *state)
6538 struct drm_connector_state *new_con_state =
6539 drm_atomic_get_new_connector_state(state, conn);
6540 struct drm_connector_state *old_con_state =
6541 drm_atomic_get_old_connector_state(state, conn);
6542 struct drm_crtc *crtc = new_con_state->crtc;
6543 struct drm_crtc_state *new_crtc_state;
6546 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6551 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6552 struct dc_info_packet hdr_infopacket;
6554 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6558 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6559 if (IS_ERR(new_crtc_state))
6560 return PTR_ERR(new_crtc_state);
6563 * DC considers the stream backends changed if the
6564 * static metadata changes. Forcing the modeset also
6565 * gives a simple way for userspace to switch from
6566 * 8bpc to 10bpc when setting the metadata to enter
6569 * Changing the static metadata after it's been
6570 * set is permissible, however. So only force a
6571 * modeset if we're entering or exiting HDR.
6573 new_crtc_state->mode_changed =
6574 !old_con_state->hdr_output_metadata ||
6575 !new_con_state->hdr_output_metadata;
6581 static const struct drm_connector_helper_funcs
6582 amdgpu_dm_connector_helper_funcs = {
6584 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6585 * modes will be filtered by drm_mode_validate_size(), and those modes
6586 * are missing after user start lightdm. So we need to renew modes list.
6587 * in get_modes call back, not just return the modes count
6589 .get_modes = get_modes,
6590 .mode_valid = amdgpu_dm_connector_mode_valid,
6591 .atomic_check = amdgpu_dm_connector_atomic_check,
6594 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6598 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6600 struct drm_atomic_state *state = new_crtc_state->state;
6601 struct drm_plane *plane;
6604 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6605 struct drm_plane_state *new_plane_state;
6607 /* Cursor planes are "fake". */
6608 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6611 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6613 if (!new_plane_state) {
6615 * The plane is enable on the CRTC and hasn't changed
6616 * state. This means that it previously passed
6617 * validation and is therefore enabled.
6623 /* We need a framebuffer to be considered enabled. */
6624 num_active += (new_plane_state->fb != NULL);
6630 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6631 struct drm_crtc_state *new_crtc_state)
6633 struct dm_crtc_state *dm_new_crtc_state =
6634 to_dm_crtc_state(new_crtc_state);
6636 dm_new_crtc_state->active_planes = 0;
6638 if (!dm_new_crtc_state->stream)
6641 dm_new_crtc_state->active_planes =
6642 count_crtc_active_planes(new_crtc_state);
6645 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6646 struct drm_atomic_state *state)
6648 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6650 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6651 struct dc *dc = adev->dm.dc;
6652 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6655 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6657 dm_update_crtc_active_planes(crtc, crtc_state);
6659 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6660 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6665 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6666 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6667 * planes are disabled, which is not supported by the hardware. And there is legacy
6668 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6670 if (crtc_state->enable &&
6671 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6672 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6676 /* In some use cases, like reset, no stream is attached */
6677 if (!dm_crtc_state->stream)
6680 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6683 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6687 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6688 const struct drm_display_mode *mode,
6689 struct drm_display_mode *adjusted_mode)
6694 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6695 .disable = dm_crtc_helper_disable,
6696 .atomic_check = dm_crtc_helper_atomic_check,
6697 .mode_fixup = dm_crtc_helper_mode_fixup,
6698 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6701 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6706 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6708 switch (display_color_depth) {
6709 case COLOR_DEPTH_666:
6711 case COLOR_DEPTH_888:
6713 case COLOR_DEPTH_101010:
6715 case COLOR_DEPTH_121212:
6717 case COLOR_DEPTH_141414:
6719 case COLOR_DEPTH_161616:
6727 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6728 struct drm_crtc_state *crtc_state,
6729 struct drm_connector_state *conn_state)
6731 struct drm_atomic_state *state = crtc_state->state;
6732 struct drm_connector *connector = conn_state->connector;
6733 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6734 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6735 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6736 struct drm_dp_mst_topology_mgr *mst_mgr;
6737 struct drm_dp_mst_port *mst_port;
6738 enum dc_color_depth color_depth;
6740 bool is_y420 = false;
6742 if (!aconnector->port || !aconnector->dc_sink)
6745 mst_port = aconnector->port;
6746 mst_mgr = &aconnector->mst_port->mst_mgr;
6748 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6751 if (!state->duplicated) {
6752 int max_bpc = conn_state->max_requested_bpc;
6753 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6754 aconnector->force_yuv420_output;
6755 color_depth = convert_color_depth_from_display_info(connector,
6758 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6759 clock = adjusted_mode->clock;
6760 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6762 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6765 dm_new_connector_state->pbn,
6766 dm_mst_get_pbn_divider(aconnector->dc_link));
6767 if (dm_new_connector_state->vcpi_slots < 0) {
6768 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6769 return dm_new_connector_state->vcpi_slots;
6774 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6775 .disable = dm_encoder_helper_disable,
6776 .atomic_check = dm_encoder_helper_atomic_check
6779 #if defined(CONFIG_DRM_AMD_DC_DCN)
6780 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6781 struct dc_state *dc_state)
6783 struct dc_stream_state *stream = NULL;
6784 struct drm_connector *connector;
6785 struct drm_connector_state *new_con_state;
6786 struct amdgpu_dm_connector *aconnector;
6787 struct dm_connector_state *dm_conn_state;
6788 int i, j, clock, bpp;
6789 int vcpi, pbn_div, pbn = 0;
6791 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6793 aconnector = to_amdgpu_dm_connector(connector);
6795 if (!aconnector->port)
6798 if (!new_con_state || !new_con_state->crtc)
6801 dm_conn_state = to_dm_connector_state(new_con_state);
6803 for (j = 0; j < dc_state->stream_count; j++) {
6804 stream = dc_state->streams[j];
6808 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6817 if (stream->timing.flags.DSC != 1) {
6818 drm_dp_mst_atomic_enable_dsc(state,
6826 pbn_div = dm_mst_get_pbn_divider(stream->link);
6827 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6828 clock = stream->timing.pix_clk_100hz / 10;
6829 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6830 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6837 dm_conn_state->pbn = pbn;
6838 dm_conn_state->vcpi_slots = vcpi;
6844 static void dm_drm_plane_reset(struct drm_plane *plane)
6846 struct dm_plane_state *amdgpu_state = NULL;
6849 plane->funcs->atomic_destroy_state(plane, plane->state);
6851 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6852 WARN_ON(amdgpu_state == NULL);
6855 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6858 static struct drm_plane_state *
6859 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6861 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6863 old_dm_plane_state = to_dm_plane_state(plane->state);
6864 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6865 if (!dm_plane_state)
6868 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6870 if (old_dm_plane_state->dc_state) {
6871 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6872 dc_plane_state_retain(dm_plane_state->dc_state);
6875 return &dm_plane_state->base;
6878 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6879 struct drm_plane_state *state)
6881 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6883 if (dm_plane_state->dc_state)
6884 dc_plane_state_release(dm_plane_state->dc_state);
6886 drm_atomic_helper_plane_destroy_state(plane, state);
6889 static const struct drm_plane_funcs dm_plane_funcs = {
6890 .update_plane = drm_atomic_helper_update_plane,
6891 .disable_plane = drm_atomic_helper_disable_plane,
6892 .destroy = drm_primary_helper_destroy,
6893 .reset = dm_drm_plane_reset,
6894 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6895 .atomic_destroy_state = dm_drm_plane_destroy_state,
6896 .format_mod_supported = dm_plane_format_mod_supported,
6899 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6900 struct drm_plane_state *new_state)
6902 struct amdgpu_framebuffer *afb;
6903 struct drm_gem_object *obj;
6904 struct amdgpu_device *adev;
6905 struct amdgpu_bo *rbo;
6906 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6907 struct list_head list;
6908 struct ttm_validate_buffer tv;
6909 struct ww_acquire_ctx ticket;
6913 if (!new_state->fb) {
6914 DRM_DEBUG_KMS("No FB bound\n");
6918 afb = to_amdgpu_framebuffer(new_state->fb);
6919 obj = new_state->fb->obj[0];
6920 rbo = gem_to_amdgpu_bo(obj);
6921 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6922 INIT_LIST_HEAD(&list);
6926 list_add(&tv.head, &list);
6928 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6930 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6934 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6935 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6937 domain = AMDGPU_GEM_DOMAIN_VRAM;
6939 r = amdgpu_bo_pin(rbo, domain);
6940 if (unlikely(r != 0)) {
6941 if (r != -ERESTARTSYS)
6942 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6943 ttm_eu_backoff_reservation(&ticket, &list);
6947 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6948 if (unlikely(r != 0)) {
6949 amdgpu_bo_unpin(rbo);
6950 ttm_eu_backoff_reservation(&ticket, &list);
6951 DRM_ERROR("%p bind failed\n", rbo);
6955 ttm_eu_backoff_reservation(&ticket, &list);
6957 afb->address = amdgpu_bo_gpu_offset(rbo);
6962 * We don't do surface updates on planes that have been newly created,
6963 * but we also don't have the afb->address during atomic check.
6965 * Fill in buffer attributes depending on the address here, but only on
6966 * newly created planes since they're not being used by DC yet and this
6967 * won't modify global state.
6969 dm_plane_state_old = to_dm_plane_state(plane->state);
6970 dm_plane_state_new = to_dm_plane_state(new_state);
6972 if (dm_plane_state_new->dc_state &&
6973 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6974 struct dc_plane_state *plane_state =
6975 dm_plane_state_new->dc_state;
6976 bool force_disable_dcc = !plane_state->dcc.enable;
6978 fill_plane_buffer_attributes(
6979 adev, afb, plane_state->format, plane_state->rotation,
6981 &plane_state->tiling_info, &plane_state->plane_size,
6982 &plane_state->dcc, &plane_state->address,
6983 afb->tmz_surface, force_disable_dcc);
6989 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6990 struct drm_plane_state *old_state)
6992 struct amdgpu_bo *rbo;
6998 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6999 r = amdgpu_bo_reserve(rbo, false);
7001 DRM_ERROR("failed to reserve rbo before unpin\n");
7005 amdgpu_bo_unpin(rbo);
7006 amdgpu_bo_unreserve(rbo);
7007 amdgpu_bo_unref(&rbo);
7010 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7011 struct drm_crtc_state *new_crtc_state)
7013 struct drm_framebuffer *fb = state->fb;
7014 int min_downscale, max_upscale;
7016 int max_scale = INT_MAX;
7018 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7019 if (fb && state->crtc) {
7020 /* Validate viewport to cover the case when only the position changes */
7021 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7022 int viewport_width = state->crtc_w;
7023 int viewport_height = state->crtc_h;
7025 if (state->crtc_x < 0)
7026 viewport_width += state->crtc_x;
7027 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7028 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7030 if (state->crtc_y < 0)
7031 viewport_height += state->crtc_y;
7032 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7033 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7035 if (viewport_width < 0 || viewport_height < 0) {
7036 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7038 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7039 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7041 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7042 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7048 /* Get min/max allowed scaling factors from plane caps. */
7049 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7050 &min_downscale, &max_upscale);
7052 * Convert to drm convention: 16.16 fixed point, instead of dc's
7053 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7054 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7056 min_scale = (1000 << 16) / max_upscale;
7057 max_scale = (1000 << 16) / min_downscale;
7060 return drm_atomic_helper_check_plane_state(
7061 state, new_crtc_state, min_scale, max_scale, true, true);
7064 static int dm_plane_atomic_check(struct drm_plane *plane,
7065 struct drm_atomic_state *state)
7067 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7069 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7070 struct dc *dc = adev->dm.dc;
7071 struct dm_plane_state *dm_plane_state;
7072 struct dc_scaling_info scaling_info;
7073 struct drm_crtc_state *new_crtc_state;
7076 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7078 dm_plane_state = to_dm_plane_state(new_plane_state);
7080 if (!dm_plane_state->dc_state)
7084 drm_atomic_get_new_crtc_state(state,
7085 new_plane_state->crtc);
7086 if (!new_crtc_state)
7089 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7093 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7097 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7103 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7104 struct drm_atomic_state *state)
7106 /* Only support async updates on cursor planes. */
7107 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7113 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7114 struct drm_atomic_state *state)
7116 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7118 struct drm_plane_state *old_state =
7119 drm_atomic_get_old_plane_state(state, plane);
7121 trace_amdgpu_dm_atomic_update_cursor(new_state);
7123 swap(plane->state->fb, new_state->fb);
7125 plane->state->src_x = new_state->src_x;
7126 plane->state->src_y = new_state->src_y;
7127 plane->state->src_w = new_state->src_w;
7128 plane->state->src_h = new_state->src_h;
7129 plane->state->crtc_x = new_state->crtc_x;
7130 plane->state->crtc_y = new_state->crtc_y;
7131 plane->state->crtc_w = new_state->crtc_w;
7132 plane->state->crtc_h = new_state->crtc_h;
7134 handle_cursor_update(plane, old_state);
7137 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7138 .prepare_fb = dm_plane_helper_prepare_fb,
7139 .cleanup_fb = dm_plane_helper_cleanup_fb,
7140 .atomic_check = dm_plane_atomic_check,
7141 .atomic_async_check = dm_plane_atomic_async_check,
7142 .atomic_async_update = dm_plane_atomic_async_update
7146 * TODO: these are currently initialized to rgb formats only.
7147 * For future use cases we should either initialize them dynamically based on
7148 * plane capabilities, or initialize this array to all formats, so internal drm
7149 * check will succeed, and let DC implement proper check
7151 static const uint32_t rgb_formats[] = {
7152 DRM_FORMAT_XRGB8888,
7153 DRM_FORMAT_ARGB8888,
7154 DRM_FORMAT_RGBA8888,
7155 DRM_FORMAT_XRGB2101010,
7156 DRM_FORMAT_XBGR2101010,
7157 DRM_FORMAT_ARGB2101010,
7158 DRM_FORMAT_ABGR2101010,
7159 DRM_FORMAT_XRGB16161616,
7160 DRM_FORMAT_XBGR16161616,
7161 DRM_FORMAT_ARGB16161616,
7162 DRM_FORMAT_ABGR16161616,
7163 DRM_FORMAT_XBGR8888,
7164 DRM_FORMAT_ABGR8888,
7168 static const uint32_t overlay_formats[] = {
7169 DRM_FORMAT_XRGB8888,
7170 DRM_FORMAT_ARGB8888,
7171 DRM_FORMAT_RGBA8888,
7172 DRM_FORMAT_XBGR8888,
7173 DRM_FORMAT_ABGR8888,
7177 static const u32 cursor_formats[] = {
7181 static int get_plane_formats(const struct drm_plane *plane,
7182 const struct dc_plane_cap *plane_cap,
7183 uint32_t *formats, int max_formats)
7185 int i, num_formats = 0;
7188 * TODO: Query support for each group of formats directly from
7189 * DC plane caps. This will require adding more formats to the
7193 switch (plane->type) {
7194 case DRM_PLANE_TYPE_PRIMARY:
7195 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7196 if (num_formats >= max_formats)
7199 formats[num_formats++] = rgb_formats[i];
7202 if (plane_cap && plane_cap->pixel_format_support.nv12)
7203 formats[num_formats++] = DRM_FORMAT_NV12;
7204 if (plane_cap && plane_cap->pixel_format_support.p010)
7205 formats[num_formats++] = DRM_FORMAT_P010;
7206 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7207 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7208 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7209 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7210 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7214 case DRM_PLANE_TYPE_OVERLAY:
7215 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7216 if (num_formats >= max_formats)
7219 formats[num_formats++] = overlay_formats[i];
7223 case DRM_PLANE_TYPE_CURSOR:
7224 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7225 if (num_formats >= max_formats)
7228 formats[num_formats++] = cursor_formats[i];
7236 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7237 struct drm_plane *plane,
7238 unsigned long possible_crtcs,
7239 const struct dc_plane_cap *plane_cap)
7241 uint32_t formats[32];
7244 unsigned int supported_rotations;
7245 uint64_t *modifiers = NULL;
7247 num_formats = get_plane_formats(plane, plane_cap, formats,
7248 ARRAY_SIZE(formats));
7250 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7254 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7255 &dm_plane_funcs, formats, num_formats,
7256 modifiers, plane->type, NULL);
7261 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7262 plane_cap && plane_cap->per_pixel_alpha) {
7263 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7264 BIT(DRM_MODE_BLEND_PREMULTI);
7266 drm_plane_create_alpha_property(plane);
7267 drm_plane_create_blend_mode_property(plane, blend_caps);
7270 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7272 (plane_cap->pixel_format_support.nv12 ||
7273 plane_cap->pixel_format_support.p010)) {
7274 /* This only affects YUV formats. */
7275 drm_plane_create_color_properties(
7277 BIT(DRM_COLOR_YCBCR_BT601) |
7278 BIT(DRM_COLOR_YCBCR_BT709) |
7279 BIT(DRM_COLOR_YCBCR_BT2020),
7280 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7281 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7282 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7285 supported_rotations =
7286 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7287 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7289 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7290 plane->type != DRM_PLANE_TYPE_CURSOR)
7291 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7292 supported_rotations);
7294 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7296 /* Create (reset) the plane state */
7297 if (plane->funcs->reset)
7298 plane->funcs->reset(plane);
7303 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7304 struct drm_plane *plane,
7305 uint32_t crtc_index)
7307 struct amdgpu_crtc *acrtc = NULL;
7308 struct drm_plane *cursor_plane;
7312 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7316 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7317 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7319 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7323 res = drm_crtc_init_with_planes(
7328 &amdgpu_dm_crtc_funcs, NULL);
7333 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7335 /* Create (reset) the plane state */
7336 if (acrtc->base.funcs->reset)
7337 acrtc->base.funcs->reset(&acrtc->base);
7339 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7340 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7342 acrtc->crtc_id = crtc_index;
7343 acrtc->base.enabled = false;
7344 acrtc->otg_inst = -1;
7346 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7347 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7348 true, MAX_COLOR_LUT_ENTRIES);
7349 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7355 kfree(cursor_plane);
7360 static int to_drm_connector_type(enum signal_type st)
7363 case SIGNAL_TYPE_HDMI_TYPE_A:
7364 return DRM_MODE_CONNECTOR_HDMIA;
7365 case SIGNAL_TYPE_EDP:
7366 return DRM_MODE_CONNECTOR_eDP;
7367 case SIGNAL_TYPE_LVDS:
7368 return DRM_MODE_CONNECTOR_LVDS;
7369 case SIGNAL_TYPE_RGB:
7370 return DRM_MODE_CONNECTOR_VGA;
7371 case SIGNAL_TYPE_DISPLAY_PORT:
7372 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7373 return DRM_MODE_CONNECTOR_DisplayPort;
7374 case SIGNAL_TYPE_DVI_DUAL_LINK:
7375 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7376 return DRM_MODE_CONNECTOR_DVID;
7377 case SIGNAL_TYPE_VIRTUAL:
7378 return DRM_MODE_CONNECTOR_VIRTUAL;
7381 return DRM_MODE_CONNECTOR_Unknown;
7385 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7387 struct drm_encoder *encoder;
7389 /* There is only one encoder per connector */
7390 drm_connector_for_each_possible_encoder(connector, encoder)
7396 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7398 struct drm_encoder *encoder;
7399 struct amdgpu_encoder *amdgpu_encoder;
7401 encoder = amdgpu_dm_connector_to_encoder(connector);
7403 if (encoder == NULL)
7406 amdgpu_encoder = to_amdgpu_encoder(encoder);
7408 amdgpu_encoder->native_mode.clock = 0;
7410 if (!list_empty(&connector->probed_modes)) {
7411 struct drm_display_mode *preferred_mode = NULL;
7413 list_for_each_entry(preferred_mode,
7414 &connector->probed_modes,
7416 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7417 amdgpu_encoder->native_mode = *preferred_mode;
7425 static struct drm_display_mode *
7426 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7428 int hdisplay, int vdisplay)
7430 struct drm_device *dev = encoder->dev;
7431 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7432 struct drm_display_mode *mode = NULL;
7433 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7435 mode = drm_mode_duplicate(dev, native_mode);
7440 mode->hdisplay = hdisplay;
7441 mode->vdisplay = vdisplay;
7442 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7443 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7449 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7450 struct drm_connector *connector)
7452 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7453 struct drm_display_mode *mode = NULL;
7454 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7455 struct amdgpu_dm_connector *amdgpu_dm_connector =
7456 to_amdgpu_dm_connector(connector);
7460 char name[DRM_DISPLAY_MODE_LEN];
7463 } common_modes[] = {
7464 { "640x480", 640, 480},
7465 { "800x600", 800, 600},
7466 { "1024x768", 1024, 768},
7467 { "1280x720", 1280, 720},
7468 { "1280x800", 1280, 800},
7469 {"1280x1024", 1280, 1024},
7470 { "1440x900", 1440, 900},
7471 {"1680x1050", 1680, 1050},
7472 {"1600x1200", 1600, 1200},
7473 {"1920x1080", 1920, 1080},
7474 {"1920x1200", 1920, 1200}
7477 n = ARRAY_SIZE(common_modes);
7479 for (i = 0; i < n; i++) {
7480 struct drm_display_mode *curmode = NULL;
7481 bool mode_existed = false;
7483 if (common_modes[i].w > native_mode->hdisplay ||
7484 common_modes[i].h > native_mode->vdisplay ||
7485 (common_modes[i].w == native_mode->hdisplay &&
7486 common_modes[i].h == native_mode->vdisplay))
7489 list_for_each_entry(curmode, &connector->probed_modes, head) {
7490 if (common_modes[i].w == curmode->hdisplay &&
7491 common_modes[i].h == curmode->vdisplay) {
7492 mode_existed = true;
7500 mode = amdgpu_dm_create_common_mode(encoder,
7501 common_modes[i].name, common_modes[i].w,
7503 drm_mode_probed_add(connector, mode);
7504 amdgpu_dm_connector->num_modes++;
7508 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7511 struct amdgpu_dm_connector *amdgpu_dm_connector =
7512 to_amdgpu_dm_connector(connector);
7515 /* empty probed_modes */
7516 INIT_LIST_HEAD(&connector->probed_modes);
7517 amdgpu_dm_connector->num_modes =
7518 drm_add_edid_modes(connector, edid);
7520 /* sorting the probed modes before calling function
7521 * amdgpu_dm_get_native_mode() since EDID can have
7522 * more than one preferred mode. The modes that are
7523 * later in the probed mode list could be of higher
7524 * and preferred resolution. For example, 3840x2160
7525 * resolution in base EDID preferred timing and 4096x2160
7526 * preferred resolution in DID extension block later.
7528 drm_mode_sort(&connector->probed_modes);
7529 amdgpu_dm_get_native_mode(connector);
7531 /* Freesync capabilities are reset by calling
7532 * drm_add_edid_modes() and need to be
7535 amdgpu_dm_update_freesync_caps(connector, edid);
7537 amdgpu_dm_connector->num_modes = 0;
7541 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7542 struct drm_display_mode *mode)
7544 struct drm_display_mode *m;
7546 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7547 if (drm_mode_equal(m, mode))
7554 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7556 const struct drm_display_mode *m;
7557 struct drm_display_mode *new_mode;
7559 uint32_t new_modes_count = 0;
7561 /* Standard FPS values
7570 * 60 - Commonly used
7571 * 48,72,96 - Multiples of 24
7573 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7574 48000, 50000, 60000, 72000, 96000 };
7577 * Find mode with highest refresh rate with the same resolution
7578 * as the preferred mode. Some monitors report a preferred mode
7579 * with lower resolution than the highest refresh rate supported.
7582 m = get_highest_refresh_rate_mode(aconnector, true);
7586 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7587 uint64_t target_vtotal, target_vtotal_diff;
7590 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7593 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7594 common_rates[i] > aconnector->max_vfreq * 1000)
7597 num = (unsigned long long)m->clock * 1000 * 1000;
7598 den = common_rates[i] * (unsigned long long)m->htotal;
7599 target_vtotal = div_u64(num, den);
7600 target_vtotal_diff = target_vtotal - m->vtotal;
7602 /* Check for illegal modes */
7603 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7604 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7605 m->vtotal + target_vtotal_diff < m->vsync_end)
7608 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7612 new_mode->vtotal += (u16)target_vtotal_diff;
7613 new_mode->vsync_start += (u16)target_vtotal_diff;
7614 new_mode->vsync_end += (u16)target_vtotal_diff;
7615 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7616 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7618 if (!is_duplicate_mode(aconnector, new_mode)) {
7619 drm_mode_probed_add(&aconnector->base, new_mode);
7620 new_modes_count += 1;
7622 drm_mode_destroy(aconnector->base.dev, new_mode);
7625 return new_modes_count;
7628 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7631 struct amdgpu_dm_connector *amdgpu_dm_connector =
7632 to_amdgpu_dm_connector(connector);
7634 if (!(amdgpu_freesync_vid_mode && edid))
7637 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7638 amdgpu_dm_connector->num_modes +=
7639 add_fs_modes(amdgpu_dm_connector);
7642 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7644 struct amdgpu_dm_connector *amdgpu_dm_connector =
7645 to_amdgpu_dm_connector(connector);
7646 struct drm_encoder *encoder;
7647 struct edid *edid = amdgpu_dm_connector->edid;
7649 encoder = amdgpu_dm_connector_to_encoder(connector);
7651 if (!drm_edid_is_valid(edid)) {
7652 amdgpu_dm_connector->num_modes =
7653 drm_add_modes_noedid(connector, 640, 480);
7655 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7656 amdgpu_dm_connector_add_common_modes(encoder, connector);
7657 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7659 amdgpu_dm_fbc_init(connector);
7661 return amdgpu_dm_connector->num_modes;
7664 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7665 struct amdgpu_dm_connector *aconnector,
7667 struct dc_link *link,
7670 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7673 * Some of the properties below require access to state, like bpc.
7674 * Allocate some default initial connector state with our reset helper.
7676 if (aconnector->base.funcs->reset)
7677 aconnector->base.funcs->reset(&aconnector->base);
7679 aconnector->connector_id = link_index;
7680 aconnector->dc_link = link;
7681 aconnector->base.interlace_allowed = false;
7682 aconnector->base.doublescan_allowed = false;
7683 aconnector->base.stereo_allowed = false;
7684 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7685 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7686 aconnector->audio_inst = -1;
7687 mutex_init(&aconnector->hpd_lock);
7690 * configure support HPD hot plug connector_>polled default value is 0
7691 * which means HPD hot plug not supported
7693 switch (connector_type) {
7694 case DRM_MODE_CONNECTOR_HDMIA:
7695 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7696 aconnector->base.ycbcr_420_allowed =
7697 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7699 case DRM_MODE_CONNECTOR_DisplayPort:
7700 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7701 aconnector->base.ycbcr_420_allowed =
7702 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7704 case DRM_MODE_CONNECTOR_DVID:
7705 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7711 drm_object_attach_property(&aconnector->base.base,
7712 dm->ddev->mode_config.scaling_mode_property,
7713 DRM_MODE_SCALE_NONE);
7715 drm_object_attach_property(&aconnector->base.base,
7716 adev->mode_info.underscan_property,
7718 drm_object_attach_property(&aconnector->base.base,
7719 adev->mode_info.underscan_hborder_property,
7721 drm_object_attach_property(&aconnector->base.base,
7722 adev->mode_info.underscan_vborder_property,
7725 if (!aconnector->mst_port)
7726 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7728 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7729 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7730 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7732 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7733 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7734 drm_object_attach_property(&aconnector->base.base,
7735 adev->mode_info.abm_level_property, 0);
7738 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7739 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7740 connector_type == DRM_MODE_CONNECTOR_eDP) {
7741 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7743 if (!aconnector->mst_port)
7744 drm_connector_attach_vrr_capable_property(&aconnector->base);
7746 #ifdef CONFIG_DRM_AMD_DC_HDCP
7747 if (adev->dm.hdcp_workqueue)
7748 drm_connector_attach_content_protection_property(&aconnector->base, true);
7753 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7754 struct i2c_msg *msgs, int num)
7756 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7757 struct ddc_service *ddc_service = i2c->ddc_service;
7758 struct i2c_command cmd;
7762 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7767 cmd.number_of_payloads = num;
7768 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7771 for (i = 0; i < num; i++) {
7772 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7773 cmd.payloads[i].address = msgs[i].addr;
7774 cmd.payloads[i].length = msgs[i].len;
7775 cmd.payloads[i].data = msgs[i].buf;
7779 ddc_service->ctx->dc,
7780 ddc_service->ddc_pin->hw_info.ddc_channel,
7784 kfree(cmd.payloads);
7788 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7790 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7793 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7794 .master_xfer = amdgpu_dm_i2c_xfer,
7795 .functionality = amdgpu_dm_i2c_func,
7798 static struct amdgpu_i2c_adapter *
7799 create_i2c(struct ddc_service *ddc_service,
7803 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7804 struct amdgpu_i2c_adapter *i2c;
7806 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7809 i2c->base.owner = THIS_MODULE;
7810 i2c->base.class = I2C_CLASS_DDC;
7811 i2c->base.dev.parent = &adev->pdev->dev;
7812 i2c->base.algo = &amdgpu_dm_i2c_algo;
7813 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7814 i2c_set_adapdata(&i2c->base, i2c);
7815 i2c->ddc_service = ddc_service;
7816 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7823 * Note: this function assumes that dc_link_detect() was called for the
7824 * dc_link which will be represented by this aconnector.
7826 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7827 struct amdgpu_dm_connector *aconnector,
7828 uint32_t link_index,
7829 struct amdgpu_encoder *aencoder)
7833 struct dc *dc = dm->dc;
7834 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7835 struct amdgpu_i2c_adapter *i2c;
7837 link->priv = aconnector;
7839 DRM_DEBUG_DRIVER("%s()\n", __func__);
7841 i2c = create_i2c(link->ddc, link->link_index, &res);
7843 DRM_ERROR("Failed to create i2c adapter data\n");
7847 aconnector->i2c = i2c;
7848 res = i2c_add_adapter(&i2c->base);
7851 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7855 connector_type = to_drm_connector_type(link->connector_signal);
7857 res = drm_connector_init_with_ddc(
7860 &amdgpu_dm_connector_funcs,
7865 DRM_ERROR("connector_init failed\n");
7866 aconnector->connector_id = -1;
7870 drm_connector_helper_add(
7872 &amdgpu_dm_connector_helper_funcs);
7874 amdgpu_dm_connector_init_helper(
7881 drm_connector_attach_encoder(
7882 &aconnector->base, &aencoder->base);
7884 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7885 || connector_type == DRM_MODE_CONNECTOR_eDP)
7886 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7891 aconnector->i2c = NULL;
7896 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7898 switch (adev->mode_info.num_crtc) {
7915 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7916 struct amdgpu_encoder *aencoder,
7917 uint32_t link_index)
7919 struct amdgpu_device *adev = drm_to_adev(dev);
7921 int res = drm_encoder_init(dev,
7923 &amdgpu_dm_encoder_funcs,
7924 DRM_MODE_ENCODER_TMDS,
7927 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7930 aencoder->encoder_id = link_index;
7932 aencoder->encoder_id = -1;
7934 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7939 static void manage_dm_interrupts(struct amdgpu_device *adev,
7940 struct amdgpu_crtc *acrtc,
7944 * We have no guarantee that the frontend index maps to the same
7945 * backend index - some even map to more than one.
7947 * TODO: Use a different interrupt or check DC itself for the mapping.
7950 amdgpu_display_crtc_idx_to_irq_type(
7955 drm_crtc_vblank_on(&acrtc->base);
7958 &adev->pageflip_irq,
7960 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7967 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7975 &adev->pageflip_irq,
7977 drm_crtc_vblank_off(&acrtc->base);
7981 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7982 struct amdgpu_crtc *acrtc)
7985 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7988 * This reads the current state for the IRQ and force reapplies
7989 * the setting to hardware.
7991 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7995 is_scaling_state_different(const struct dm_connector_state *dm_state,
7996 const struct dm_connector_state *old_dm_state)
7998 if (dm_state->scaling != old_dm_state->scaling)
8000 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8001 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8003 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8004 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8006 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8007 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8012 #ifdef CONFIG_DRM_AMD_DC_HDCP
8013 static bool is_content_protection_different(struct drm_connector_state *state,
8014 const struct drm_connector_state *old_state,
8015 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8017 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8018 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8020 /* Handle: Type0/1 change */
8021 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8022 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8023 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8027 /* CP is being re enabled, ignore this
8029 * Handles: ENABLED -> DESIRED
8031 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8032 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8033 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8037 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8039 * Handles: UNDESIRED -> ENABLED
8041 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8042 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8043 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8045 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8046 * hot-plug, headless s3, dpms
8048 * Handles: DESIRED -> DESIRED (Special case)
8050 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8051 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8052 dm_con_state->update_hdcp = false;
8057 * Handles: UNDESIRED -> UNDESIRED
8058 * DESIRED -> DESIRED
8059 * ENABLED -> ENABLED
8061 if (old_state->content_protection == state->content_protection)
8065 * Handles: UNDESIRED -> DESIRED
8066 * DESIRED -> UNDESIRED
8067 * ENABLED -> UNDESIRED
8069 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8073 * Handles: DESIRED -> ENABLED
8079 static void remove_stream(struct amdgpu_device *adev,
8080 struct amdgpu_crtc *acrtc,
8081 struct dc_stream_state *stream)
8083 /* this is the update mode case */
8085 acrtc->otg_inst = -1;
8086 acrtc->enabled = false;
8089 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8090 struct dc_cursor_position *position)
8092 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8094 int xorigin = 0, yorigin = 0;
8096 if (!crtc || !plane->state->fb)
8099 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8100 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8101 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8103 plane->state->crtc_w,
8104 plane->state->crtc_h);
8108 x = plane->state->crtc_x;
8109 y = plane->state->crtc_y;
8111 if (x <= -amdgpu_crtc->max_cursor_width ||
8112 y <= -amdgpu_crtc->max_cursor_height)
8116 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8120 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8123 position->enable = true;
8124 position->translate_by_source = true;
8127 position->x_hotspot = xorigin;
8128 position->y_hotspot = yorigin;
8133 static void handle_cursor_update(struct drm_plane *plane,
8134 struct drm_plane_state *old_plane_state)
8136 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8137 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8138 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8139 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8140 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8141 uint64_t address = afb ? afb->address : 0;
8142 struct dc_cursor_position position = {0};
8143 struct dc_cursor_attributes attributes;
8146 if (!plane->state->fb && !old_plane_state->fb)
8149 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8151 amdgpu_crtc->crtc_id,
8152 plane->state->crtc_w,
8153 plane->state->crtc_h);
8155 ret = get_cursor_position(plane, crtc, &position);
8159 if (!position.enable) {
8160 /* turn off cursor */
8161 if (crtc_state && crtc_state->stream) {
8162 mutex_lock(&adev->dm.dc_lock);
8163 dc_stream_set_cursor_position(crtc_state->stream,
8165 mutex_unlock(&adev->dm.dc_lock);
8170 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8171 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8173 memset(&attributes, 0, sizeof(attributes));
8174 attributes.address.high_part = upper_32_bits(address);
8175 attributes.address.low_part = lower_32_bits(address);
8176 attributes.width = plane->state->crtc_w;
8177 attributes.height = plane->state->crtc_h;
8178 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8179 attributes.rotation_angle = 0;
8180 attributes.attribute_flags.value = 0;
8182 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8184 if (crtc_state->stream) {
8185 mutex_lock(&adev->dm.dc_lock);
8186 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8188 DRM_ERROR("DC failed to set cursor attributes\n");
8190 if (!dc_stream_set_cursor_position(crtc_state->stream,
8192 DRM_ERROR("DC failed to set cursor position\n");
8193 mutex_unlock(&adev->dm.dc_lock);
8197 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8200 assert_spin_locked(&acrtc->base.dev->event_lock);
8201 WARN_ON(acrtc->event);
8203 acrtc->event = acrtc->base.state->event;
8205 /* Set the flip status */
8206 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8208 /* Mark this event as consumed */
8209 acrtc->base.state->event = NULL;
8211 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8215 static void update_freesync_state_on_stream(
8216 struct amdgpu_display_manager *dm,
8217 struct dm_crtc_state *new_crtc_state,
8218 struct dc_stream_state *new_stream,
8219 struct dc_plane_state *surface,
8220 u32 flip_timestamp_in_us)
8222 struct mod_vrr_params vrr_params;
8223 struct dc_info_packet vrr_infopacket = {0};
8224 struct amdgpu_device *adev = dm->adev;
8225 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8226 unsigned long flags;
8227 bool pack_sdp_v1_3 = false;
8233 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8234 * For now it's sufficient to just guard against these conditions.
8237 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8240 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8241 vrr_params = acrtc->dm_irq_params.vrr_params;
8244 mod_freesync_handle_preflip(
8245 dm->freesync_module,
8248 flip_timestamp_in_us,
8251 if (adev->family < AMDGPU_FAMILY_AI &&
8252 amdgpu_dm_vrr_active(new_crtc_state)) {
8253 mod_freesync_handle_v_update(dm->freesync_module,
8254 new_stream, &vrr_params);
8256 /* Need to call this before the frame ends. */
8257 dc_stream_adjust_vmin_vmax(dm->dc,
8258 new_crtc_state->stream,
8259 &vrr_params.adjust);
8263 mod_freesync_build_vrr_infopacket(
8264 dm->freesync_module,
8268 TRANSFER_FUNC_UNKNOWN,
8272 new_crtc_state->freesync_timing_changed |=
8273 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8275 sizeof(vrr_params.adjust)) != 0);
8277 new_crtc_state->freesync_vrr_info_changed |=
8278 (memcmp(&new_crtc_state->vrr_infopacket,
8280 sizeof(vrr_infopacket)) != 0);
8282 acrtc->dm_irq_params.vrr_params = vrr_params;
8283 new_crtc_state->vrr_infopacket = vrr_infopacket;
8285 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8286 new_stream->vrr_infopacket = vrr_infopacket;
8288 if (new_crtc_state->freesync_vrr_info_changed)
8289 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8290 new_crtc_state->base.crtc->base.id,
8291 (int)new_crtc_state->base.vrr_enabled,
8292 (int)vrr_params.state);
8294 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8297 static void update_stream_irq_parameters(
8298 struct amdgpu_display_manager *dm,
8299 struct dm_crtc_state *new_crtc_state)
8301 struct dc_stream_state *new_stream = new_crtc_state->stream;
8302 struct mod_vrr_params vrr_params;
8303 struct mod_freesync_config config = new_crtc_state->freesync_config;
8304 struct amdgpu_device *adev = dm->adev;
8305 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8306 unsigned long flags;
8312 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8313 * For now it's sufficient to just guard against these conditions.
8315 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8318 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8319 vrr_params = acrtc->dm_irq_params.vrr_params;
8321 if (new_crtc_state->vrr_supported &&
8322 config.min_refresh_in_uhz &&
8323 config.max_refresh_in_uhz) {
8325 * if freesync compatible mode was set, config.state will be set
8328 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8329 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8330 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8331 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8332 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8333 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8334 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8336 config.state = new_crtc_state->base.vrr_enabled ?
8337 VRR_STATE_ACTIVE_VARIABLE :
8341 config.state = VRR_STATE_UNSUPPORTED;
8344 mod_freesync_build_vrr_params(dm->freesync_module,
8346 &config, &vrr_params);
8348 new_crtc_state->freesync_timing_changed |=
8349 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8350 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8352 new_crtc_state->freesync_config = config;
8353 /* Copy state for access from DM IRQ handler */
8354 acrtc->dm_irq_params.freesync_config = config;
8355 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8356 acrtc->dm_irq_params.vrr_params = vrr_params;
8357 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8360 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8361 struct dm_crtc_state *new_state)
8363 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8364 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8366 if (!old_vrr_active && new_vrr_active) {
8367 /* Transition VRR inactive -> active:
8368 * While VRR is active, we must not disable vblank irq, as a
8369 * reenable after disable would compute bogus vblank/pflip
8370 * timestamps if it likely happened inside display front-porch.
8372 * We also need vupdate irq for the actual core vblank handling
8375 dm_set_vupdate_irq(new_state->base.crtc, true);
8376 drm_crtc_vblank_get(new_state->base.crtc);
8377 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8378 __func__, new_state->base.crtc->base.id);
8379 } else if (old_vrr_active && !new_vrr_active) {
8380 /* Transition VRR active -> inactive:
8381 * Allow vblank irq disable again for fixed refresh rate.
8383 dm_set_vupdate_irq(new_state->base.crtc, false);
8384 drm_crtc_vblank_put(new_state->base.crtc);
8385 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8386 __func__, new_state->base.crtc->base.id);
8390 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8392 struct drm_plane *plane;
8393 struct drm_plane_state *old_plane_state;
8397 * TODO: Make this per-stream so we don't issue redundant updates for
8398 * commits with multiple streams.
8400 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8401 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8402 handle_cursor_update(plane, old_plane_state);
8405 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8406 struct dc_state *dc_state,
8407 struct drm_device *dev,
8408 struct amdgpu_display_manager *dm,
8409 struct drm_crtc *pcrtc,
8410 bool wait_for_vblank)
8413 uint64_t timestamp_ns;
8414 struct drm_plane *plane;
8415 struct drm_plane_state *old_plane_state, *new_plane_state;
8416 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8417 struct drm_crtc_state *new_pcrtc_state =
8418 drm_atomic_get_new_crtc_state(state, pcrtc);
8419 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8420 struct dm_crtc_state *dm_old_crtc_state =
8421 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8422 int planes_count = 0, vpos, hpos;
8424 unsigned long flags;
8425 struct amdgpu_bo *abo;
8426 uint32_t target_vblank, last_flip_vblank;
8427 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8428 bool pflip_present = false;
8430 struct dc_surface_update surface_updates[MAX_SURFACES];
8431 struct dc_plane_info plane_infos[MAX_SURFACES];
8432 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8433 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8434 struct dc_stream_update stream_update;
8437 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8440 dm_error("Failed to allocate update bundle\n");
8445 * Disable the cursor first if we're disabling all the planes.
8446 * It'll remain on the screen after the planes are re-enabled
8449 if (acrtc_state->active_planes == 0)
8450 amdgpu_dm_commit_cursors(state);
8452 /* update planes when needed */
8453 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8454 struct drm_crtc *crtc = new_plane_state->crtc;
8455 struct drm_crtc_state *new_crtc_state;
8456 struct drm_framebuffer *fb = new_plane_state->fb;
8457 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8458 bool plane_needs_flip;
8459 struct dc_plane_state *dc_plane;
8460 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8462 /* Cursor plane is handled after stream updates */
8463 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8466 if (!fb || !crtc || pcrtc != crtc)
8469 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8470 if (!new_crtc_state->active)
8473 dc_plane = dm_new_plane_state->dc_state;
8475 bundle->surface_updates[planes_count].surface = dc_plane;
8476 if (new_pcrtc_state->color_mgmt_changed) {
8477 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8478 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8479 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8482 fill_dc_scaling_info(new_plane_state,
8483 &bundle->scaling_infos[planes_count]);
8485 bundle->surface_updates[planes_count].scaling_info =
8486 &bundle->scaling_infos[planes_count];
8488 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8490 pflip_present = pflip_present || plane_needs_flip;
8492 if (!plane_needs_flip) {
8497 abo = gem_to_amdgpu_bo(fb->obj[0]);
8500 * Wait for all fences on this FB. Do limited wait to avoid
8501 * deadlock during GPU reset when this fence will not signal
8502 * but we hold reservation lock for the BO.
8504 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8505 msecs_to_jiffies(5000));
8506 if (unlikely(r <= 0))
8507 DRM_ERROR("Waiting for fences timed out!");
8509 fill_dc_plane_info_and_addr(
8510 dm->adev, new_plane_state,
8512 &bundle->plane_infos[planes_count],
8513 &bundle->flip_addrs[planes_count].address,
8514 afb->tmz_surface, false);
8516 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8517 new_plane_state->plane->index,
8518 bundle->plane_infos[planes_count].dcc.enable);
8520 bundle->surface_updates[planes_count].plane_info =
8521 &bundle->plane_infos[planes_count];
8524 * Only allow immediate flips for fast updates that don't
8525 * change FB pitch, DCC state, rotation or mirroing.
8527 bundle->flip_addrs[planes_count].flip_immediate =
8528 crtc->state->async_flip &&
8529 acrtc_state->update_type == UPDATE_TYPE_FAST;
8531 timestamp_ns = ktime_get_ns();
8532 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8533 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8534 bundle->surface_updates[planes_count].surface = dc_plane;
8536 if (!bundle->surface_updates[planes_count].surface) {
8537 DRM_ERROR("No surface for CRTC: id=%d\n",
8538 acrtc_attach->crtc_id);
8542 if (plane == pcrtc->primary)
8543 update_freesync_state_on_stream(
8546 acrtc_state->stream,
8548 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8550 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8552 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8553 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8559 if (pflip_present) {
8561 /* Use old throttling in non-vrr fixed refresh rate mode
8562 * to keep flip scheduling based on target vblank counts
8563 * working in a backwards compatible way, e.g., for
8564 * clients using the GLX_OML_sync_control extension or
8565 * DRI3/Present extension with defined target_msc.
8567 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8570 /* For variable refresh rate mode only:
8571 * Get vblank of last completed flip to avoid > 1 vrr
8572 * flips per video frame by use of throttling, but allow
8573 * flip programming anywhere in the possibly large
8574 * variable vrr vblank interval for fine-grained flip
8575 * timing control and more opportunity to avoid stutter
8576 * on late submission of flips.
8578 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8579 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8580 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8583 target_vblank = last_flip_vblank + wait_for_vblank;
8586 * Wait until we're out of the vertical blank period before the one
8587 * targeted by the flip
8589 while ((acrtc_attach->enabled &&
8590 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8591 0, &vpos, &hpos, NULL,
8592 NULL, &pcrtc->hwmode)
8593 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8594 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8595 (int)(target_vblank -
8596 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8597 usleep_range(1000, 1100);
8601 * Prepare the flip event for the pageflip interrupt to handle.
8603 * This only works in the case where we've already turned on the
8604 * appropriate hardware blocks (eg. HUBP) so in the transition case
8605 * from 0 -> n planes we have to skip a hardware generated event
8606 * and rely on sending it from software.
8608 if (acrtc_attach->base.state->event &&
8609 acrtc_state->active_planes > 0) {
8610 drm_crtc_vblank_get(pcrtc);
8612 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8614 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8615 prepare_flip_isr(acrtc_attach);
8617 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8620 if (acrtc_state->stream) {
8621 if (acrtc_state->freesync_vrr_info_changed)
8622 bundle->stream_update.vrr_infopacket =
8623 &acrtc_state->stream->vrr_infopacket;
8627 /* Update the planes if changed or disable if we don't have any. */
8628 if ((planes_count || acrtc_state->active_planes == 0) &&
8629 acrtc_state->stream) {
8630 bundle->stream_update.stream = acrtc_state->stream;
8631 if (new_pcrtc_state->mode_changed) {
8632 bundle->stream_update.src = acrtc_state->stream->src;
8633 bundle->stream_update.dst = acrtc_state->stream->dst;
8636 if (new_pcrtc_state->color_mgmt_changed) {
8638 * TODO: This isn't fully correct since we've actually
8639 * already modified the stream in place.
8641 bundle->stream_update.gamut_remap =
8642 &acrtc_state->stream->gamut_remap_matrix;
8643 bundle->stream_update.output_csc_transform =
8644 &acrtc_state->stream->csc_color_matrix;
8645 bundle->stream_update.out_transfer_func =
8646 acrtc_state->stream->out_transfer_func;
8649 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8650 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8651 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8654 * If FreeSync state on the stream has changed then we need to
8655 * re-adjust the min/max bounds now that DC doesn't handle this
8656 * as part of commit.
8658 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8659 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8660 dc_stream_adjust_vmin_vmax(
8661 dm->dc, acrtc_state->stream,
8662 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8663 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8665 mutex_lock(&dm->dc_lock);
8666 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8667 acrtc_state->stream->link->psr_settings.psr_allow_active)
8668 amdgpu_dm_psr_disable(acrtc_state->stream);
8670 dc_commit_updates_for_stream(dm->dc,
8671 bundle->surface_updates,
8673 acrtc_state->stream,
8674 &bundle->stream_update,
8678 * Enable or disable the interrupts on the backend.
8680 * Most pipes are put into power gating when unused.
8682 * When power gating is enabled on a pipe we lose the
8683 * interrupt enablement state when power gating is disabled.
8685 * So we need to update the IRQ control state in hardware
8686 * whenever the pipe turns on (since it could be previously
8687 * power gated) or off (since some pipes can't be power gated
8690 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8691 dm_update_pflip_irq_state(drm_to_adev(dev),
8694 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8695 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8696 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8697 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8698 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8699 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8700 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8701 struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
8702 acrtc_state->stream->dm_stream_context;
8704 if (aconn->psr_skip_count > 0)
8705 aconn->psr_skip_count--;
8707 amdgpu_dm_psr_enable(acrtc_state->stream);
8710 mutex_unlock(&dm->dc_lock);
8714 * Update cursor state *after* programming all the planes.
8715 * This avoids redundant programming in the case where we're going
8716 * to be disabling a single plane - those pipes are being disabled.
8718 if (acrtc_state->active_planes)
8719 amdgpu_dm_commit_cursors(state);
8725 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8726 struct drm_atomic_state *state)
8728 struct amdgpu_device *adev = drm_to_adev(dev);
8729 struct amdgpu_dm_connector *aconnector;
8730 struct drm_connector *connector;
8731 struct drm_connector_state *old_con_state, *new_con_state;
8732 struct drm_crtc_state *new_crtc_state;
8733 struct dm_crtc_state *new_dm_crtc_state;
8734 const struct dc_stream_status *status;
8737 /* Notify device removals. */
8738 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8739 if (old_con_state->crtc != new_con_state->crtc) {
8740 /* CRTC changes require notification. */
8744 if (!new_con_state->crtc)
8747 new_crtc_state = drm_atomic_get_new_crtc_state(
8748 state, new_con_state->crtc);
8750 if (!new_crtc_state)
8753 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8757 aconnector = to_amdgpu_dm_connector(connector);
8759 mutex_lock(&adev->dm.audio_lock);
8760 inst = aconnector->audio_inst;
8761 aconnector->audio_inst = -1;
8762 mutex_unlock(&adev->dm.audio_lock);
8764 amdgpu_dm_audio_eld_notify(adev, inst);
8767 /* Notify audio device additions. */
8768 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8769 if (!new_con_state->crtc)
8772 new_crtc_state = drm_atomic_get_new_crtc_state(
8773 state, new_con_state->crtc);
8775 if (!new_crtc_state)
8778 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8781 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8782 if (!new_dm_crtc_state->stream)
8785 status = dc_stream_get_status(new_dm_crtc_state->stream);
8789 aconnector = to_amdgpu_dm_connector(connector);
8791 mutex_lock(&adev->dm.audio_lock);
8792 inst = status->audio_inst;
8793 aconnector->audio_inst = inst;
8794 mutex_unlock(&adev->dm.audio_lock);
8796 amdgpu_dm_audio_eld_notify(adev, inst);
8801 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8802 * @crtc_state: the DRM CRTC state
8803 * @stream_state: the DC stream state.
8805 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8806 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8808 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8809 struct dc_stream_state *stream_state)
8811 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8815 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8816 * @state: The atomic state to commit
8818 * This will tell DC to commit the constructed DC state from atomic_check,
8819 * programming the hardware. Any failures here implies a hardware failure, since
8820 * atomic check should have filtered anything non-kosher.
8822 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8824 struct drm_device *dev = state->dev;
8825 struct amdgpu_device *adev = drm_to_adev(dev);
8826 struct amdgpu_display_manager *dm = &adev->dm;
8827 struct dm_atomic_state *dm_state;
8828 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8830 struct drm_crtc *crtc;
8831 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8832 unsigned long flags;
8833 bool wait_for_vblank = true;
8834 struct drm_connector *connector;
8835 struct drm_connector_state *old_con_state, *new_con_state;
8836 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8837 int crtc_disable_count = 0;
8838 bool mode_set_reset_required = false;
8840 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8842 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8844 dm_state = dm_atomic_get_new_state(state);
8845 if (dm_state && dm_state->context) {
8846 dc_state = dm_state->context;
8848 /* No state changes, retain current state. */
8849 dc_state_temp = dc_create_state(dm->dc);
8850 ASSERT(dc_state_temp);
8851 dc_state = dc_state_temp;
8852 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8855 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8856 new_crtc_state, i) {
8857 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8859 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8861 if (old_crtc_state->active &&
8862 (!new_crtc_state->active ||
8863 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8864 manage_dm_interrupts(adev, acrtc, false);
8865 dc_stream_release(dm_old_crtc_state->stream);
8869 drm_atomic_helper_calc_timestamping_constants(state);
8871 /* update changed items */
8872 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8875 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8876 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8879 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8880 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8881 "connectors_changed:%d\n",
8883 new_crtc_state->enable,
8884 new_crtc_state->active,
8885 new_crtc_state->planes_changed,
8886 new_crtc_state->mode_changed,
8887 new_crtc_state->active_changed,
8888 new_crtc_state->connectors_changed);
8890 /* Disable cursor if disabling crtc */
8891 if (old_crtc_state->active && !new_crtc_state->active) {
8892 struct dc_cursor_position position;
8894 memset(&position, 0, sizeof(position));
8895 mutex_lock(&dm->dc_lock);
8896 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8897 mutex_unlock(&dm->dc_lock);
8900 /* Copy all transient state flags into dc state */
8901 if (dm_new_crtc_state->stream) {
8902 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8903 dm_new_crtc_state->stream);
8906 /* handles headless hotplug case, updating new_state and
8907 * aconnector as needed
8910 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8912 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8914 if (!dm_new_crtc_state->stream) {
8916 * this could happen because of issues with
8917 * userspace notifications delivery.
8918 * In this case userspace tries to set mode on
8919 * display which is disconnected in fact.
8920 * dc_sink is NULL in this case on aconnector.
8921 * We expect reset mode will come soon.
8923 * This can also happen when unplug is done
8924 * during resume sequence ended
8926 * In this case, we want to pretend we still
8927 * have a sink to keep the pipe running so that
8928 * hw state is consistent with the sw state
8930 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8931 __func__, acrtc->base.base.id);
8935 if (dm_old_crtc_state->stream)
8936 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8938 pm_runtime_get_noresume(dev->dev);
8940 acrtc->enabled = true;
8941 acrtc->hw_mode = new_crtc_state->mode;
8942 crtc->hwmode = new_crtc_state->mode;
8943 mode_set_reset_required = true;
8944 } else if (modereset_required(new_crtc_state)) {
8945 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8946 /* i.e. reset mode */
8947 if (dm_old_crtc_state->stream)
8948 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8950 mode_set_reset_required = true;
8952 } /* for_each_crtc_in_state() */
8955 /* if there mode set or reset, disable eDP PSR */
8956 if (mode_set_reset_required)
8957 amdgpu_dm_psr_disable_all(dm);
8959 dm_enable_per_frame_crtc_master_sync(dc_state);
8960 mutex_lock(&dm->dc_lock);
8961 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8962 #if defined(CONFIG_DRM_AMD_DC_DCN)
8963 /* Allow idle optimization when vblank count is 0 for display off */
8964 if (dm->active_vblank_irq_count == 0)
8965 dc_allow_idle_optimizations(dm->dc,true);
8967 mutex_unlock(&dm->dc_lock);
8970 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8971 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8973 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8975 if (dm_new_crtc_state->stream != NULL) {
8976 const struct dc_stream_status *status =
8977 dc_stream_get_status(dm_new_crtc_state->stream);
8980 status = dc_stream_get_status_from_state(dc_state,
8981 dm_new_crtc_state->stream);
8983 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8985 acrtc->otg_inst = status->primary_otg_inst;
8988 #ifdef CONFIG_DRM_AMD_DC_HDCP
8989 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8990 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8991 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8992 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8994 new_crtc_state = NULL;
8997 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8999 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9001 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9002 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9003 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9004 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9005 dm_new_con_state->update_hdcp = true;
9009 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9010 hdcp_update_display(
9011 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9012 new_con_state->hdcp_content_type,
9013 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9017 /* Handle connector state changes */
9018 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9019 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9020 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9021 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9022 struct dc_surface_update dummy_updates[MAX_SURFACES];
9023 struct dc_stream_update stream_update;
9024 struct dc_info_packet hdr_packet;
9025 struct dc_stream_status *status = NULL;
9026 bool abm_changed, hdr_changed, scaling_changed;
9028 memset(&dummy_updates, 0, sizeof(dummy_updates));
9029 memset(&stream_update, 0, sizeof(stream_update));
9032 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9033 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9036 /* Skip any modesets/resets */
9037 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9040 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9041 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9043 scaling_changed = is_scaling_state_different(dm_new_con_state,
9046 abm_changed = dm_new_crtc_state->abm_level !=
9047 dm_old_crtc_state->abm_level;
9050 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9052 if (!scaling_changed && !abm_changed && !hdr_changed)
9055 stream_update.stream = dm_new_crtc_state->stream;
9056 if (scaling_changed) {
9057 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9058 dm_new_con_state, dm_new_crtc_state->stream);
9060 stream_update.src = dm_new_crtc_state->stream->src;
9061 stream_update.dst = dm_new_crtc_state->stream->dst;
9065 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9067 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9071 fill_hdr_info_packet(new_con_state, &hdr_packet);
9072 stream_update.hdr_static_metadata = &hdr_packet;
9075 status = dc_stream_get_status(dm_new_crtc_state->stream);
9077 if (WARN_ON(!status))
9080 WARN_ON(!status->plane_count);
9083 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9084 * Here we create an empty update on each plane.
9085 * To fix this, DC should permit updating only stream properties.
9087 for (j = 0; j < status->plane_count; j++)
9088 dummy_updates[j].surface = status->plane_states[0];
9091 mutex_lock(&dm->dc_lock);
9092 dc_commit_updates_for_stream(dm->dc,
9094 status->plane_count,
9095 dm_new_crtc_state->stream,
9098 mutex_unlock(&dm->dc_lock);
9101 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9102 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9103 new_crtc_state, i) {
9104 if (old_crtc_state->active && !new_crtc_state->active)
9105 crtc_disable_count++;
9107 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9108 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9110 /* For freesync config update on crtc state and params for irq */
9111 update_stream_irq_parameters(dm, dm_new_crtc_state);
9113 /* Handle vrr on->off / off->on transitions */
9114 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9119 * Enable interrupts for CRTCs that are newly enabled or went through
9120 * a modeset. It was intentionally deferred until after the front end
9121 * state was modified to wait until the OTG was on and so the IRQ
9122 * handlers didn't access stale or invalid state.
9124 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9125 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9126 #ifdef CONFIG_DEBUG_FS
9127 bool configure_crc = false;
9128 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9129 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9130 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9132 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9133 cur_crc_src = acrtc->dm_irq_params.crc_src;
9134 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9136 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9138 if (new_crtc_state->active &&
9139 (!old_crtc_state->active ||
9140 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9141 dc_stream_retain(dm_new_crtc_state->stream);
9142 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9143 manage_dm_interrupts(adev, acrtc, true);
9145 #ifdef CONFIG_DEBUG_FS
9147 * Frontend may have changed so reapply the CRC capture
9148 * settings for the stream.
9150 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9152 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9153 configure_crc = true;
9154 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9155 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9156 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9157 acrtc->dm_irq_params.crc_window.update_win = true;
9158 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9159 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9160 crc_rd_wrk->crtc = crtc;
9161 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9162 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9168 if (amdgpu_dm_crtc_configure_crc_source(
9169 crtc, dm_new_crtc_state, cur_crc_src))
9170 DRM_DEBUG_DRIVER("Failed to configure crc source");
9175 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9176 if (new_crtc_state->async_flip)
9177 wait_for_vblank = false;
9179 /* update planes when needed per crtc*/
9180 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9181 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9183 if (dm_new_crtc_state->stream)
9184 amdgpu_dm_commit_planes(state, dc_state, dev,
9185 dm, crtc, wait_for_vblank);
9188 /* Update audio instances for each connector. */
9189 amdgpu_dm_commit_audio(dev, state);
9191 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9192 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9193 /* restore the backlight level */
9194 if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
9195 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9198 * send vblank event on all events not handled in flip and
9199 * mark consumed event for drm_atomic_helper_commit_hw_done
9201 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9202 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9204 if (new_crtc_state->event)
9205 drm_send_event_locked(dev, &new_crtc_state->event->base);
9207 new_crtc_state->event = NULL;
9209 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9211 /* Signal HW programming completion */
9212 drm_atomic_helper_commit_hw_done(state);
9214 if (wait_for_vblank)
9215 drm_atomic_helper_wait_for_flip_done(dev, state);
9217 drm_atomic_helper_cleanup_planes(dev, state);
9219 /* return the stolen vga memory back to VRAM */
9220 if (!adev->mman.keep_stolen_vga_memory)
9221 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9222 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9225 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9226 * so we can put the GPU into runtime suspend if we're not driving any
9229 for (i = 0; i < crtc_disable_count; i++)
9230 pm_runtime_put_autosuspend(dev->dev);
9231 pm_runtime_mark_last_busy(dev->dev);
9234 dc_release_state(dc_state_temp);
9238 static int dm_force_atomic_commit(struct drm_connector *connector)
9241 struct drm_device *ddev = connector->dev;
9242 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9243 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9244 struct drm_plane *plane = disconnected_acrtc->base.primary;
9245 struct drm_connector_state *conn_state;
9246 struct drm_crtc_state *crtc_state;
9247 struct drm_plane_state *plane_state;
9252 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9254 /* Construct an atomic state to restore previous display setting */
9257 * Attach connectors to drm_atomic_state
9259 conn_state = drm_atomic_get_connector_state(state, connector);
9261 ret = PTR_ERR_OR_ZERO(conn_state);
9265 /* Attach crtc to drm_atomic_state*/
9266 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9268 ret = PTR_ERR_OR_ZERO(crtc_state);
9272 /* force a restore */
9273 crtc_state->mode_changed = true;
9275 /* Attach plane to drm_atomic_state */
9276 plane_state = drm_atomic_get_plane_state(state, plane);
9278 ret = PTR_ERR_OR_ZERO(plane_state);
9282 /* Call commit internally with the state we just constructed */
9283 ret = drm_atomic_commit(state);
9286 drm_atomic_state_put(state);
9288 DRM_ERROR("Restoring old state failed with %i\n", ret);
9294 * This function handles all cases when set mode does not come upon hotplug.
9295 * This includes when a display is unplugged then plugged back into the
9296 * same port and when running without usermode desktop manager supprot
9298 void dm_restore_drm_connector_state(struct drm_device *dev,
9299 struct drm_connector *connector)
9301 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9302 struct amdgpu_crtc *disconnected_acrtc;
9303 struct dm_crtc_state *acrtc_state;
9305 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9308 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9309 if (!disconnected_acrtc)
9312 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9313 if (!acrtc_state->stream)
9317 * If the previous sink is not released and different from the current,
9318 * we deduce we are in a state where we can not rely on usermode call
9319 * to turn on the display, so we do it here
9321 if (acrtc_state->stream->sink != aconnector->dc_sink)
9322 dm_force_atomic_commit(&aconnector->base);
9326 * Grabs all modesetting locks to serialize against any blocking commits,
9327 * Waits for completion of all non blocking commits.
9329 static int do_aquire_global_lock(struct drm_device *dev,
9330 struct drm_atomic_state *state)
9332 struct drm_crtc *crtc;
9333 struct drm_crtc_commit *commit;
9337 * Adding all modeset locks to aquire_ctx will
9338 * ensure that when the framework release it the
9339 * extra locks we are locking here will get released to
9341 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9345 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9346 spin_lock(&crtc->commit_lock);
9347 commit = list_first_entry_or_null(&crtc->commit_list,
9348 struct drm_crtc_commit, commit_entry);
9350 drm_crtc_commit_get(commit);
9351 spin_unlock(&crtc->commit_lock);
9357 * Make sure all pending HW programming completed and
9360 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9363 ret = wait_for_completion_interruptible_timeout(
9364 &commit->flip_done, 10*HZ);
9367 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9368 "timed out\n", crtc->base.id, crtc->name);
9370 drm_crtc_commit_put(commit);
9373 return ret < 0 ? ret : 0;
9376 static void get_freesync_config_for_crtc(
9377 struct dm_crtc_state *new_crtc_state,
9378 struct dm_connector_state *new_con_state)
9380 struct mod_freesync_config config = {0};
9381 struct amdgpu_dm_connector *aconnector =
9382 to_amdgpu_dm_connector(new_con_state->base.connector);
9383 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9384 int vrefresh = drm_mode_vrefresh(mode);
9385 bool fs_vid_mode = false;
9387 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9388 vrefresh >= aconnector->min_vfreq &&
9389 vrefresh <= aconnector->max_vfreq;
9391 if (new_crtc_state->vrr_supported) {
9392 new_crtc_state->stream->ignore_msa_timing_param = true;
9393 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9395 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9396 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9397 config.vsif_supported = true;
9401 config.state = VRR_STATE_ACTIVE_FIXED;
9402 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9404 } else if (new_crtc_state->base.vrr_enabled) {
9405 config.state = VRR_STATE_ACTIVE_VARIABLE;
9407 config.state = VRR_STATE_INACTIVE;
9411 new_crtc_state->freesync_config = config;
9414 static void reset_freesync_config_for_crtc(
9415 struct dm_crtc_state *new_crtc_state)
9417 new_crtc_state->vrr_supported = false;
9419 memset(&new_crtc_state->vrr_infopacket, 0,
9420 sizeof(new_crtc_state->vrr_infopacket));
9424 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9425 struct drm_crtc_state *new_crtc_state)
9427 struct drm_display_mode old_mode, new_mode;
9429 if (!old_crtc_state || !new_crtc_state)
9432 old_mode = old_crtc_state->mode;
9433 new_mode = new_crtc_state->mode;
9435 if (old_mode.clock == new_mode.clock &&
9436 old_mode.hdisplay == new_mode.hdisplay &&
9437 old_mode.vdisplay == new_mode.vdisplay &&
9438 old_mode.htotal == new_mode.htotal &&
9439 old_mode.vtotal != new_mode.vtotal &&
9440 old_mode.hsync_start == new_mode.hsync_start &&
9441 old_mode.vsync_start != new_mode.vsync_start &&
9442 old_mode.hsync_end == new_mode.hsync_end &&
9443 old_mode.vsync_end != new_mode.vsync_end &&
9444 old_mode.hskew == new_mode.hskew &&
9445 old_mode.vscan == new_mode.vscan &&
9446 (old_mode.vsync_end - old_mode.vsync_start) ==
9447 (new_mode.vsync_end - new_mode.vsync_start))
9453 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9454 uint64_t num, den, res;
9455 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9457 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9459 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9460 den = (unsigned long long)new_crtc_state->mode.htotal *
9461 (unsigned long long)new_crtc_state->mode.vtotal;
9463 res = div_u64(num, den);
9464 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9467 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9468 struct drm_atomic_state *state,
9469 struct drm_crtc *crtc,
9470 struct drm_crtc_state *old_crtc_state,
9471 struct drm_crtc_state *new_crtc_state,
9473 bool *lock_and_validation_needed)
9475 struct dm_atomic_state *dm_state = NULL;
9476 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9477 struct dc_stream_state *new_stream;
9481 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9482 * update changed items
9484 struct amdgpu_crtc *acrtc = NULL;
9485 struct amdgpu_dm_connector *aconnector = NULL;
9486 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9487 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9491 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9492 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9493 acrtc = to_amdgpu_crtc(crtc);
9494 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9496 /* TODO This hack should go away */
9497 if (aconnector && enable) {
9498 /* Make sure fake sink is created in plug-in scenario */
9499 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9501 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9504 if (IS_ERR(drm_new_conn_state)) {
9505 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9509 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9510 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9512 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9515 new_stream = create_validate_stream_for_sink(aconnector,
9516 &new_crtc_state->mode,
9518 dm_old_crtc_state->stream);
9521 * we can have no stream on ACTION_SET if a display
9522 * was disconnected during S3, in this case it is not an
9523 * error, the OS will be updated after detection, and
9524 * will do the right thing on next atomic commit
9528 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9529 __func__, acrtc->base.base.id);
9535 * TODO: Check VSDB bits to decide whether this should
9536 * be enabled or not.
9538 new_stream->triggered_crtc_reset.enabled =
9539 dm->force_timing_sync;
9541 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9543 ret = fill_hdr_info_packet(drm_new_conn_state,
9544 &new_stream->hdr_static_metadata);
9549 * If we already removed the old stream from the context
9550 * (and set the new stream to NULL) then we can't reuse
9551 * the old stream even if the stream and scaling are unchanged.
9552 * We'll hit the BUG_ON and black screen.
9554 * TODO: Refactor this function to allow this check to work
9555 * in all conditions.
9557 if (amdgpu_freesync_vid_mode &&
9558 dm_new_crtc_state->stream &&
9559 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9562 if (dm_new_crtc_state->stream &&
9563 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9564 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9565 new_crtc_state->mode_changed = false;
9566 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9567 new_crtc_state->mode_changed);
9571 /* mode_changed flag may get updated above, need to check again */
9572 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9576 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9577 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9578 "connectors_changed:%d\n",
9580 new_crtc_state->enable,
9581 new_crtc_state->active,
9582 new_crtc_state->planes_changed,
9583 new_crtc_state->mode_changed,
9584 new_crtc_state->active_changed,
9585 new_crtc_state->connectors_changed);
9587 /* Remove stream for any changed/disabled CRTC */
9590 if (!dm_old_crtc_state->stream)
9593 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9594 is_timing_unchanged_for_freesync(new_crtc_state,
9596 new_crtc_state->mode_changed = false;
9598 "Mode change not required for front porch change, "
9599 "setting mode_changed to %d",
9600 new_crtc_state->mode_changed);
9602 set_freesync_fixed_config(dm_new_crtc_state);
9605 } else if (amdgpu_freesync_vid_mode && aconnector &&
9606 is_freesync_video_mode(&new_crtc_state->mode,
9608 set_freesync_fixed_config(dm_new_crtc_state);
9611 ret = dm_atomic_get_state(state, &dm_state);
9615 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9618 /* i.e. reset mode */
9619 if (dc_remove_stream_from_ctx(
9622 dm_old_crtc_state->stream) != DC_OK) {
9627 dc_stream_release(dm_old_crtc_state->stream);
9628 dm_new_crtc_state->stream = NULL;
9630 reset_freesync_config_for_crtc(dm_new_crtc_state);
9632 *lock_and_validation_needed = true;
9634 } else {/* Add stream for any updated/enabled CRTC */
9636 * Quick fix to prevent NULL pointer on new_stream when
9637 * added MST connectors not found in existing crtc_state in the chained mode
9638 * TODO: need to dig out the root cause of that
9640 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9643 if (modereset_required(new_crtc_state))
9646 if (modeset_required(new_crtc_state, new_stream,
9647 dm_old_crtc_state->stream)) {
9649 WARN_ON(dm_new_crtc_state->stream);
9651 ret = dm_atomic_get_state(state, &dm_state);
9655 dm_new_crtc_state->stream = new_stream;
9657 dc_stream_retain(new_stream);
9659 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9662 if (dc_add_stream_to_ctx(
9665 dm_new_crtc_state->stream) != DC_OK) {
9670 *lock_and_validation_needed = true;
9675 /* Release extra reference */
9677 dc_stream_release(new_stream);
9680 * We want to do dc stream updates that do not require a
9681 * full modeset below.
9683 if (!(enable && aconnector && new_crtc_state->active))
9686 * Given above conditions, the dc state cannot be NULL because:
9687 * 1. We're in the process of enabling CRTCs (just been added
9688 * to the dc context, or already is on the context)
9689 * 2. Has a valid connector attached, and
9690 * 3. Is currently active and enabled.
9691 * => The dc stream state currently exists.
9693 BUG_ON(dm_new_crtc_state->stream == NULL);
9695 /* Scaling or underscan settings */
9696 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9697 drm_atomic_crtc_needs_modeset(new_crtc_state))
9698 update_stream_scaling_settings(
9699 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9702 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9705 * Color management settings. We also update color properties
9706 * when a modeset is needed, to ensure it gets reprogrammed.
9708 if (dm_new_crtc_state->base.color_mgmt_changed ||
9709 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9710 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9715 /* Update Freesync settings. */
9716 get_freesync_config_for_crtc(dm_new_crtc_state,
9723 dc_stream_release(new_stream);
9727 static bool should_reset_plane(struct drm_atomic_state *state,
9728 struct drm_plane *plane,
9729 struct drm_plane_state *old_plane_state,
9730 struct drm_plane_state *new_plane_state)
9732 struct drm_plane *other;
9733 struct drm_plane_state *old_other_state, *new_other_state;
9734 struct drm_crtc_state *new_crtc_state;
9738 * TODO: Remove this hack once the checks below are sufficient
9739 * enough to determine when we need to reset all the planes on
9742 if (state->allow_modeset)
9745 /* Exit early if we know that we're adding or removing the plane. */
9746 if (old_plane_state->crtc != new_plane_state->crtc)
9749 /* old crtc == new_crtc == NULL, plane not in context. */
9750 if (!new_plane_state->crtc)
9754 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9756 if (!new_crtc_state)
9759 /* CRTC Degamma changes currently require us to recreate planes. */
9760 if (new_crtc_state->color_mgmt_changed)
9763 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9767 * If there are any new primary or overlay planes being added or
9768 * removed then the z-order can potentially change. To ensure
9769 * correct z-order and pipe acquisition the current DC architecture
9770 * requires us to remove and recreate all existing planes.
9772 * TODO: Come up with a more elegant solution for this.
9774 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9775 struct amdgpu_framebuffer *old_afb, *new_afb;
9776 if (other->type == DRM_PLANE_TYPE_CURSOR)
9779 if (old_other_state->crtc != new_plane_state->crtc &&
9780 new_other_state->crtc != new_plane_state->crtc)
9783 if (old_other_state->crtc != new_other_state->crtc)
9786 /* Src/dst size and scaling updates. */
9787 if (old_other_state->src_w != new_other_state->src_w ||
9788 old_other_state->src_h != new_other_state->src_h ||
9789 old_other_state->crtc_w != new_other_state->crtc_w ||
9790 old_other_state->crtc_h != new_other_state->crtc_h)
9793 /* Rotation / mirroring updates. */
9794 if (old_other_state->rotation != new_other_state->rotation)
9797 /* Blending updates. */
9798 if (old_other_state->pixel_blend_mode !=
9799 new_other_state->pixel_blend_mode)
9802 /* Alpha updates. */
9803 if (old_other_state->alpha != new_other_state->alpha)
9806 /* Colorspace changes. */
9807 if (old_other_state->color_range != new_other_state->color_range ||
9808 old_other_state->color_encoding != new_other_state->color_encoding)
9811 /* Framebuffer checks fall at the end. */
9812 if (!old_other_state->fb || !new_other_state->fb)
9815 /* Pixel format changes can require bandwidth updates. */
9816 if (old_other_state->fb->format != new_other_state->fb->format)
9819 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9820 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9822 /* Tiling and DCC changes also require bandwidth updates. */
9823 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9824 old_afb->base.modifier != new_afb->base.modifier)
9831 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9832 struct drm_plane_state *new_plane_state,
9833 struct drm_framebuffer *fb)
9835 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9836 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9840 if (fb->width > new_acrtc->max_cursor_width ||
9841 fb->height > new_acrtc->max_cursor_height) {
9842 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9843 new_plane_state->fb->width,
9844 new_plane_state->fb->height);
9847 if (new_plane_state->src_w != fb->width << 16 ||
9848 new_plane_state->src_h != fb->height << 16) {
9849 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9853 /* Pitch in pixels */
9854 pitch = fb->pitches[0] / fb->format->cpp[0];
9856 if (fb->width != pitch) {
9857 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9866 /* FB pitch is supported by cursor plane */
9869 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9873 /* Core DRM takes care of checking FB modifiers, so we only need to
9874 * check tiling flags when the FB doesn't have a modifier. */
9875 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9876 if (adev->family < AMDGPU_FAMILY_AI) {
9877 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9878 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9879 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9881 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9884 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9892 static int dm_update_plane_state(struct dc *dc,
9893 struct drm_atomic_state *state,
9894 struct drm_plane *plane,
9895 struct drm_plane_state *old_plane_state,
9896 struct drm_plane_state *new_plane_state,
9898 bool *lock_and_validation_needed)
9901 struct dm_atomic_state *dm_state = NULL;
9902 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9903 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9904 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9905 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9906 struct amdgpu_crtc *new_acrtc;
9911 new_plane_crtc = new_plane_state->crtc;
9912 old_plane_crtc = old_plane_state->crtc;
9913 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9914 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9916 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9917 if (!enable || !new_plane_crtc ||
9918 drm_atomic_plane_disabling(plane->state, new_plane_state))
9921 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9923 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9924 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9928 if (new_plane_state->fb) {
9929 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9930 new_plane_state->fb);
9938 needs_reset = should_reset_plane(state, plane, old_plane_state,
9941 /* Remove any changed/removed planes */
9946 if (!old_plane_crtc)
9949 old_crtc_state = drm_atomic_get_old_crtc_state(
9950 state, old_plane_crtc);
9951 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9953 if (!dm_old_crtc_state->stream)
9956 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9957 plane->base.id, old_plane_crtc->base.id);
9959 ret = dm_atomic_get_state(state, &dm_state);
9963 if (!dc_remove_plane_from_context(
9965 dm_old_crtc_state->stream,
9966 dm_old_plane_state->dc_state,
9967 dm_state->context)) {
9973 dc_plane_state_release(dm_old_plane_state->dc_state);
9974 dm_new_plane_state->dc_state = NULL;
9976 *lock_and_validation_needed = true;
9978 } else { /* Add new planes */
9979 struct dc_plane_state *dc_new_plane_state;
9981 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9984 if (!new_plane_crtc)
9987 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9988 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9990 if (!dm_new_crtc_state->stream)
9996 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10000 WARN_ON(dm_new_plane_state->dc_state);
10002 dc_new_plane_state = dc_create_plane_state(dc);
10003 if (!dc_new_plane_state)
10006 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10007 plane->base.id, new_plane_crtc->base.id);
10009 ret = fill_dc_plane_attributes(
10010 drm_to_adev(new_plane_crtc->dev),
10011 dc_new_plane_state,
10015 dc_plane_state_release(dc_new_plane_state);
10019 ret = dm_atomic_get_state(state, &dm_state);
10021 dc_plane_state_release(dc_new_plane_state);
10026 * Any atomic check errors that occur after this will
10027 * not need a release. The plane state will be attached
10028 * to the stream, and therefore part of the atomic
10029 * state. It'll be released when the atomic state is
10032 if (!dc_add_plane_to_context(
10034 dm_new_crtc_state->stream,
10035 dc_new_plane_state,
10036 dm_state->context)) {
10038 dc_plane_state_release(dc_new_plane_state);
10042 dm_new_plane_state->dc_state = dc_new_plane_state;
10044 /* Tell DC to do a full surface update every time there
10045 * is a plane change. Inefficient, but works for now.
10047 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10049 *lock_and_validation_needed = true;
10056 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10057 struct drm_crtc *crtc,
10058 struct drm_crtc_state *new_crtc_state)
10060 struct drm_plane_state *new_cursor_state, *new_primary_state;
10061 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10063 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10064 * cursor per pipe but it's going to inherit the scaling and
10065 * positioning from the underlying pipe. Check the cursor plane's
10066 * blending properties match the primary plane's. */
10068 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10069 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10070 if (!new_cursor_state || !new_primary_state ||
10071 !new_cursor_state->fb || !new_primary_state->fb) {
10075 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10076 (new_cursor_state->src_w >> 16);
10077 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10078 (new_cursor_state->src_h >> 16);
10080 primary_scale_w = new_primary_state->crtc_w * 1000 /
10081 (new_primary_state->src_w >> 16);
10082 primary_scale_h = new_primary_state->crtc_h * 1000 /
10083 (new_primary_state->src_h >> 16);
10085 if (cursor_scale_w != primary_scale_w ||
10086 cursor_scale_h != primary_scale_h) {
10087 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10094 #if defined(CONFIG_DRM_AMD_DC_DCN)
10095 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10097 struct drm_connector *connector;
10098 struct drm_connector_state *conn_state;
10099 struct amdgpu_dm_connector *aconnector = NULL;
10101 for_each_new_connector_in_state(state, connector, conn_state, i) {
10102 if (conn_state->crtc != crtc)
10105 aconnector = to_amdgpu_dm_connector(connector);
10106 if (!aconnector->port || !aconnector->mst_port)
10115 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10119 static int validate_overlay(struct drm_atomic_state *state)
10122 struct drm_plane *plane;
10123 struct drm_plane_state *new_plane_state;
10124 struct drm_plane_state *primary_state, *overlay_state = NULL;
10126 /* Check if primary plane is contained inside overlay */
10127 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10128 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10129 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10132 overlay_state = new_plane_state;
10137 /* check if we're making changes to the overlay plane */
10138 if (!overlay_state)
10141 /* check if overlay plane is enabled */
10142 if (!overlay_state->crtc)
10145 /* find the primary plane for the CRTC that the overlay is enabled on */
10146 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10147 if (IS_ERR(primary_state))
10148 return PTR_ERR(primary_state);
10150 /* check if primary plane is enabled */
10151 if (!primary_state->crtc)
10154 /* Perform the bounds check to ensure the overlay plane covers the primary */
10155 if (primary_state->crtc_x < overlay_state->crtc_x ||
10156 primary_state->crtc_y < overlay_state->crtc_y ||
10157 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10158 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10159 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10167 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10168 * @dev: The DRM device
10169 * @state: The atomic state to commit
10171 * Validate that the given atomic state is programmable by DC into hardware.
10172 * This involves constructing a &struct dc_state reflecting the new hardware
10173 * state we wish to commit, then querying DC to see if it is programmable. It's
10174 * important not to modify the existing DC state. Otherwise, atomic_check
10175 * may unexpectedly commit hardware changes.
10177 * When validating the DC state, it's important that the right locks are
10178 * acquired. For full updates case which removes/adds/updates streams on one
10179 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10180 * that any such full update commit will wait for completion of any outstanding
10181 * flip using DRMs synchronization events.
10183 * Note that DM adds the affected connectors for all CRTCs in state, when that
10184 * might not seem necessary. This is because DC stream creation requires the
10185 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10186 * be possible but non-trivial - a possible TODO item.
10188 * Return: -Error code if validation failed.
10190 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10191 struct drm_atomic_state *state)
10193 struct amdgpu_device *adev = drm_to_adev(dev);
10194 struct dm_atomic_state *dm_state = NULL;
10195 struct dc *dc = adev->dm.dc;
10196 struct drm_connector *connector;
10197 struct drm_connector_state *old_con_state, *new_con_state;
10198 struct drm_crtc *crtc;
10199 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10200 struct drm_plane *plane;
10201 struct drm_plane_state *old_plane_state, *new_plane_state;
10202 enum dc_status status;
10204 bool lock_and_validation_needed = false;
10205 struct dm_crtc_state *dm_old_crtc_state;
10207 trace_amdgpu_dm_atomic_check_begin(state);
10209 ret = drm_atomic_helper_check_modeset(dev, state);
10213 /* Check connector changes */
10214 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10215 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10216 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10218 /* Skip connectors that are disabled or part of modeset already. */
10219 if (!old_con_state->crtc && !new_con_state->crtc)
10222 if (!new_con_state->crtc)
10225 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10226 if (IS_ERR(new_crtc_state)) {
10227 ret = PTR_ERR(new_crtc_state);
10231 if (dm_old_con_state->abm_level !=
10232 dm_new_con_state->abm_level)
10233 new_crtc_state->connectors_changed = true;
10236 #if defined(CONFIG_DRM_AMD_DC_DCN)
10237 if (dc_resource_is_dsc_encoding_supported(dc)) {
10238 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10239 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10240 ret = add_affected_mst_dsc_crtcs(state, crtc);
10247 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10248 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10250 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10251 !new_crtc_state->color_mgmt_changed &&
10252 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10253 dm_old_crtc_state->dsc_force_changed == false)
10256 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10260 if (!new_crtc_state->enable)
10263 ret = drm_atomic_add_affected_connectors(state, crtc);
10267 ret = drm_atomic_add_affected_planes(state, crtc);
10271 if (dm_old_crtc_state->dsc_force_changed)
10272 new_crtc_state->mode_changed = true;
10276 * Add all primary and overlay planes on the CRTC to the state
10277 * whenever a plane is enabled to maintain correct z-ordering
10278 * and to enable fast surface updates.
10280 drm_for_each_crtc(crtc, dev) {
10281 bool modified = false;
10283 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10284 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10287 if (new_plane_state->crtc == crtc ||
10288 old_plane_state->crtc == crtc) {
10297 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10298 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10302 drm_atomic_get_plane_state(state, plane);
10304 if (IS_ERR(new_plane_state)) {
10305 ret = PTR_ERR(new_plane_state);
10311 /* Remove exiting planes if they are modified */
10312 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10313 ret = dm_update_plane_state(dc, state, plane,
10317 &lock_and_validation_needed);
10322 /* Disable all crtcs which require disable */
10323 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10324 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10328 &lock_and_validation_needed);
10333 /* Enable all crtcs which require enable */
10334 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10335 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10339 &lock_and_validation_needed);
10344 ret = validate_overlay(state);
10348 /* Add new/modified planes */
10349 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10350 ret = dm_update_plane_state(dc, state, plane,
10354 &lock_and_validation_needed);
10359 /* Run this here since we want to validate the streams we created */
10360 ret = drm_atomic_helper_check_planes(dev, state);
10364 /* Check cursor planes scaling */
10365 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10366 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10371 if (state->legacy_cursor_update) {
10373 * This is a fast cursor update coming from the plane update
10374 * helper, check if it can be done asynchronously for better
10377 state->async_update =
10378 !drm_atomic_helper_async_check(dev, state);
10381 * Skip the remaining global validation if this is an async
10382 * update. Cursor updates can be done without affecting
10383 * state or bandwidth calcs and this avoids the performance
10384 * penalty of locking the private state object and
10385 * allocating a new dc_state.
10387 if (state->async_update)
10391 /* Check scaling and underscan changes*/
10392 /* TODO Removed scaling changes validation due to inability to commit
10393 * new stream into context w\o causing full reset. Need to
10394 * decide how to handle.
10396 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10397 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10398 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10399 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10401 /* Skip any modesets/resets */
10402 if (!acrtc || drm_atomic_crtc_needs_modeset(
10403 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10406 /* Skip any thing not scale or underscan changes */
10407 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10410 lock_and_validation_needed = true;
10414 * Streams and planes are reset when there are changes that affect
10415 * bandwidth. Anything that affects bandwidth needs to go through
10416 * DC global validation to ensure that the configuration can be applied
10419 * We have to currently stall out here in atomic_check for outstanding
10420 * commits to finish in this case because our IRQ handlers reference
10421 * DRM state directly - we can end up disabling interrupts too early
10424 * TODO: Remove this stall and drop DM state private objects.
10426 if (lock_and_validation_needed) {
10427 ret = dm_atomic_get_state(state, &dm_state);
10431 ret = do_aquire_global_lock(dev, state);
10435 #if defined(CONFIG_DRM_AMD_DC_DCN)
10436 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10439 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10445 * Perform validation of MST topology in the state:
10446 * We need to perform MST atomic check before calling
10447 * dc_validate_global_state(), or there is a chance
10448 * to get stuck in an infinite loop and hang eventually.
10450 ret = drm_dp_mst_atomic_check(state);
10453 status = dc_validate_global_state(dc, dm_state->context, false);
10454 if (status != DC_OK) {
10455 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10456 dc_status_to_str(status), status);
10462 * The commit is a fast update. Fast updates shouldn't change
10463 * the DC context, affect global validation, and can have their
10464 * commit work done in parallel with other commits not touching
10465 * the same resource. If we have a new DC context as part of
10466 * the DM atomic state from validation we need to free it and
10467 * retain the existing one instead.
10469 * Furthermore, since the DM atomic state only contains the DC
10470 * context and can safely be annulled, we can free the state
10471 * and clear the associated private object now to free
10472 * some memory and avoid a possible use-after-free later.
10475 for (i = 0; i < state->num_private_objs; i++) {
10476 struct drm_private_obj *obj = state->private_objs[i].ptr;
10478 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10479 int j = state->num_private_objs-1;
10481 dm_atomic_destroy_state(obj,
10482 state->private_objs[i].state);
10484 /* If i is not at the end of the array then the
10485 * last element needs to be moved to where i was
10486 * before the array can safely be truncated.
10489 state->private_objs[i] =
10490 state->private_objs[j];
10492 state->private_objs[j].ptr = NULL;
10493 state->private_objs[j].state = NULL;
10494 state->private_objs[j].old_state = NULL;
10495 state->private_objs[j].new_state = NULL;
10497 state->num_private_objs = j;
10503 /* Store the overall update type for use later in atomic check. */
10504 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10505 struct dm_crtc_state *dm_new_crtc_state =
10506 to_dm_crtc_state(new_crtc_state);
10508 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10513 /* Must be success */
10516 trace_amdgpu_dm_atomic_check_finish(state, ret);
10521 if (ret == -EDEADLK)
10522 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10523 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10524 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10526 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10528 trace_amdgpu_dm_atomic_check_finish(state, ret);
10533 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10534 struct amdgpu_dm_connector *amdgpu_dm_connector)
10537 bool capable = false;
10539 if (amdgpu_dm_connector->dc_link &&
10540 dm_helpers_dp_read_dpcd(
10542 amdgpu_dm_connector->dc_link,
10543 DP_DOWN_STREAM_PORT_COUNT,
10545 sizeof(dpcd_data))) {
10546 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10552 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10553 uint8_t *edid_ext, int len,
10554 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10557 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10558 struct dc *dc = adev->dm.dc;
10560 /* send extension block to DMCU for parsing */
10561 for (i = 0; i < len; i += 8) {
10565 /* send 8 bytes a time */
10566 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10570 /* EDID block sent completed, expect result */
10571 int version, min_rate, max_rate;
10573 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10575 /* amd vsdb found */
10576 vsdb_info->freesync_supported = 1;
10577 vsdb_info->amd_vsdb_version = version;
10578 vsdb_info->min_refresh_rate_hz = min_rate;
10579 vsdb_info->max_refresh_rate_hz = max_rate;
10587 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10595 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10596 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10598 uint8_t *edid_ext = NULL;
10600 bool valid_vsdb_found = false;
10602 /*----- drm_find_cea_extension() -----*/
10603 /* No EDID or EDID extensions */
10604 if (edid == NULL || edid->extensions == 0)
10607 /* Find CEA extension */
10608 for (i = 0; i < edid->extensions; i++) {
10609 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10610 if (edid_ext[0] == CEA_EXT)
10614 if (i == edid->extensions)
10617 /*----- cea_db_offsets() -----*/
10618 if (edid_ext[0] != CEA_EXT)
10621 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10623 return valid_vsdb_found ? i : -ENODEV;
10626 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10630 struct detailed_timing *timing;
10631 struct detailed_non_pixel *data;
10632 struct detailed_data_monitor_range *range;
10633 struct amdgpu_dm_connector *amdgpu_dm_connector =
10634 to_amdgpu_dm_connector(connector);
10635 struct dm_connector_state *dm_con_state = NULL;
10637 struct drm_device *dev = connector->dev;
10638 struct amdgpu_device *adev = drm_to_adev(dev);
10639 bool freesync_capable = false;
10640 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10642 if (!connector->state) {
10643 DRM_ERROR("%s - Connector has no state", __func__);
10648 dm_con_state = to_dm_connector_state(connector->state);
10650 amdgpu_dm_connector->min_vfreq = 0;
10651 amdgpu_dm_connector->max_vfreq = 0;
10652 amdgpu_dm_connector->pixel_clock_mhz = 0;
10657 dm_con_state = to_dm_connector_state(connector->state);
10659 if (!amdgpu_dm_connector->dc_sink) {
10660 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10663 if (!adev->dm.freesync_module)
10667 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10668 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10669 bool edid_check_required = false;
10672 edid_check_required = is_dp_capable_without_timing_msa(
10674 amdgpu_dm_connector);
10677 if (edid_check_required == true && (edid->version > 1 ||
10678 (edid->version == 1 && edid->revision > 1))) {
10679 for (i = 0; i < 4; i++) {
10681 timing = &edid->detailed_timings[i];
10682 data = &timing->data.other_data;
10683 range = &data->data.range;
10685 * Check if monitor has continuous frequency mode
10687 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10690 * Check for flag range limits only. If flag == 1 then
10691 * no additional timing information provided.
10692 * Default GTF, GTF Secondary curve and CVT are not
10695 if (range->flags != 1)
10698 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10699 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10700 amdgpu_dm_connector->pixel_clock_mhz =
10701 range->pixel_clock_mhz * 10;
10703 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10704 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10709 if (amdgpu_dm_connector->max_vfreq -
10710 amdgpu_dm_connector->min_vfreq > 10) {
10712 freesync_capable = true;
10715 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10716 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10717 if (i >= 0 && vsdb_info.freesync_supported) {
10718 timing = &edid->detailed_timings[i];
10719 data = &timing->data.other_data;
10721 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10722 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10723 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10724 freesync_capable = true;
10726 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10727 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10733 dm_con_state->freesync_capable = freesync_capable;
10735 if (connector->vrr_capable_property)
10736 drm_connector_set_vrr_capable_property(connector,
10740 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10742 struct amdgpu_device *adev = drm_to_adev(dev);
10743 struct dc *dc = adev->dm.dc;
10746 mutex_lock(&adev->dm.dc_lock);
10747 if (dc->current_state) {
10748 for (i = 0; i < dc->current_state->stream_count; ++i)
10749 dc->current_state->streams[i]
10750 ->triggered_crtc_reset.enabled =
10751 adev->dm.force_timing_sync;
10753 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10754 dc_trigger_sync(dc, dc->current_state);
10756 mutex_unlock(&adev->dm.dc_lock);
10759 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10760 uint32_t value, const char *func_name)
10762 #ifdef DM_CHECK_ADDR_0
10763 if (address == 0) {
10764 DC_ERR("invalid register write. address = 0");
10768 cgs_write_register(ctx->cgs_device, address, value);
10769 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10772 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10773 const char *func_name)
10776 #ifdef DM_CHECK_ADDR_0
10777 if (address == 0) {
10778 DC_ERR("invalid register read; address = 0\n");
10783 if (ctx->dmub_srv &&
10784 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10785 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10790 value = cgs_read_register(ctx->cgs_device, address);
10792 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10797 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10798 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10800 struct amdgpu_device *adev = ctx->driver_context;
10803 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10804 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10806 *operation_result = AUX_RET_ERROR_TIMEOUT;
10809 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10811 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10812 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10814 // For read case, Copy data to payload
10815 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10816 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10817 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10818 adev->dm.dmub_notify->aux_reply.length);
10821 return adev->dm.dmub_notify->aux_reply.length;