2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
52 #include "amdgpu_pm.h"
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
62 #include "ivsrcid/ivsrcid_vislands30.h"
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
91 #include "soc15_common.h"
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
114 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
116 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
119 /* Number of bytes in PSP header for firmware. */
120 #define PSP_HEADER_BYTES 0x100
122 /* Number of bytes in PSP footer for firmware. */
123 #define PSP_FOOTER_BYTES 0x100
128 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
129 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
130 * requests into DC requests, and DC responses into DRM responses.
132 * The root control structure is &struct amdgpu_display_manager.
135 /* basic init/fini API */
136 static int amdgpu_dm_init(struct amdgpu_device *adev);
137 static void amdgpu_dm_fini(struct amdgpu_device *adev);
138 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
140 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
142 switch (link->dpcd_caps.dongle_type) {
143 case DISPLAY_DONGLE_NONE:
144 return DRM_MODE_SUBCONNECTOR_Native;
145 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
146 return DRM_MODE_SUBCONNECTOR_VGA;
147 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
148 case DISPLAY_DONGLE_DP_DVI_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_DVID;
150 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
151 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
152 return DRM_MODE_SUBCONNECTOR_HDMIA;
153 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
155 return DRM_MODE_SUBCONNECTOR_Unknown;
159 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
161 struct dc_link *link = aconnector->dc_link;
162 struct drm_connector *connector = &aconnector->base;
163 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
165 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
168 if (aconnector->dc_sink)
169 subconnector = get_subconnector_type(link);
171 drm_object_property_set_value(&connector->base,
172 connector->dev->mode_config.dp_subconnector_property,
177 * initializes drm_device display related structures, based on the information
178 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
179 * drm_encoder, drm_mode_config
181 * Returns 0 on success
183 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
184 /* removes and deallocates the drm structures, created by the above function */
185 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
187 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
188 struct drm_plane *plane,
189 unsigned long possible_crtcs,
190 const struct dc_plane_cap *plane_cap);
191 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
192 struct drm_plane *plane,
193 uint32_t link_index);
194 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
195 struct amdgpu_dm_connector *amdgpu_dm_connector,
197 struct amdgpu_encoder *amdgpu_encoder);
198 static int amdgpu_dm_encoder_init(struct drm_device *dev,
199 struct amdgpu_encoder *aencoder,
200 uint32_t link_index);
202 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
204 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
206 static int amdgpu_dm_atomic_check(struct drm_device *dev,
207 struct drm_atomic_state *state);
209 static void handle_cursor_update(struct drm_plane *plane,
210 struct drm_plane_state *old_plane_state);
212 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
213 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
214 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
216 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
218 static const struct drm_format_info *
219 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 struct drm_crtc_state *new_crtc_state);
225 * dm_vblank_get_counter
228 * Get counter for number of vertical blanks
231 * struct amdgpu_device *adev - [in] desired amdgpu device
232 * int disp_idx - [in] which CRTC to get the counter from
235 * Counter for vertical blanks
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
239 if (crtc >= adev->mode_info.num_crtc)
242 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
244 if (acrtc->dm_irq_params.stream == NULL) {
245 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 u32 *vbl, u32 *position)
257 uint32_t v_blank_start, v_blank_end, h_position, v_position;
259 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
264 if (acrtc->dm_irq_params.stream == NULL) {
265 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271 * TODO rework base driver to use values directly.
272 * for now parse it back into reg-format
274 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280 *position = v_position | (h_position << 16);
281 *vbl = v_blank_start | (v_blank_end << 16);
287 static bool dm_is_idle(void *handle)
293 static int dm_wait_for_idle(void *handle)
299 static bool dm_check_soft_reset(void *handle)
304 static int dm_soft_reset(void *handle)
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 struct drm_device *dev = adev_to_drm(adev);
315 struct drm_crtc *crtc;
316 struct amdgpu_crtc *amdgpu_crtc;
318 if (WARN_ON(otg_inst == -1))
319 return adev->mode_info.crtcs[0];
321 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 amdgpu_crtc = to_amdgpu_crtc(crtc);
324 if (amdgpu_crtc->otg_inst == otg_inst)
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
333 return acrtc->dm_irq_params.freesync_config.state ==
334 VRR_STATE_ACTIVE_VARIABLE ||
335 acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_FIXED;
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
341 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 struct dm_crtc_state *new_state)
348 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
350 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 * dm_pflip_high_irq() - Handle pageflip interrupt
358 * @interrupt_params: ignored
360 * Handles the pageflip interrupt by notifying all interested parties
361 * that the pageflip has been completed.
363 static void dm_pflip_high_irq(void *interrupt_params)
365 struct amdgpu_crtc *amdgpu_crtc;
366 struct common_irq_params *irq_params = interrupt_params;
367 struct amdgpu_device *adev = irq_params->adev;
369 struct drm_pending_vblank_event *e;
370 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
375 /* IRQ could occur when in initial stage */
376 /* TODO work and BO cleanup */
377 if (amdgpu_crtc == NULL) {
378 DC_LOG_PFLIP("CRTC is null, returning.\n");
382 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
384 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 amdgpu_crtc->pflip_status,
387 AMDGPU_FLIP_SUBMITTED,
388 amdgpu_crtc->crtc_id,
390 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
394 /* page flip completed. */
395 e = amdgpu_crtc->event;
396 amdgpu_crtc->event = NULL;
400 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
402 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
404 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 &v_blank_end, &hpos, &vpos) ||
406 (vpos < v_blank_start)) {
407 /* Update to correct count and vblank timestamp if racing with
408 * vblank irq. This also updates to the correct vblank timestamp
409 * even in VRR mode, as scanout is past the front-porch atm.
411 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
413 /* Wake up userspace by sending the pageflip event with proper
414 * count and timestamp of vblank of flip completion.
417 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
419 /* Event sent, so done with vblank for this flip */
420 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 /* VRR active and inside front-porch: vblank count and
424 * timestamp for pageflip event will only be up to date after
425 * drm_crtc_handle_vblank() has been executed from late vblank
426 * irq handler after start of back-porch (vline 0). We queue the
427 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 * updated timestamp and count, once it runs after us.
430 * We need to open-code this instead of using the helper
431 * drm_crtc_arm_vblank_event(), as that helper would
432 * call drm_crtc_accurate_vblank_count(), which we must
433 * not call in VRR mode while we are in front-porch!
436 /* sequence will be replaced by real count during send-out. */
437 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 e->pipe = amdgpu_crtc->crtc_id;
440 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
444 /* Keep track of vblank of this flip for flip throttling. We use the
445 * cooked hw counter, as that one incremented at start of this vblank
446 * of pageflip completion, so last_flip_vblank is the forbidden count
447 * for queueing new pageflips if vsync + VRR is enabled.
449 amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
452 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
455 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 amdgpu_crtc->crtc_id, amdgpu_crtc,
457 vrr_active, (int) !e);
460 static void dm_vupdate_high_irq(void *interrupt_params)
462 struct common_irq_params *irq_params = interrupt_params;
463 struct amdgpu_device *adev = irq_params->adev;
464 struct amdgpu_crtc *acrtc;
465 struct drm_device *drm_dev;
466 struct drm_vblank_crtc *vblank;
467 ktime_t frame_duration_ns, previous_timestamp;
471 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 drm_dev = acrtc->base.dev;
476 vblank = &drm_dev->vblank[acrtc->base.index];
477 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 frame_duration_ns = vblank->time - previous_timestamp;
480 if (frame_duration_ns > 0) {
481 trace_amdgpu_refresh_rate_track(acrtc->base.index,
483 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
491 /* Core vblank handling is done here after end of front-porch in
492 * vrr mode, as vblank timestamping will give valid results
493 * while now done after front-porch. This will also deliver
494 * page-flip completion events that have been queued to us
495 * if a pageflip happened inside front-porch.
498 drm_crtc_handle_vblank(&acrtc->base);
500 /* BTR processing for pre-DCE12 ASICs */
501 if (acrtc->dm_irq_params.stream &&
502 adev->family < AMDGPU_FAMILY_AI) {
503 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 mod_freesync_handle_v_update(
505 adev->dm.freesync_module,
506 acrtc->dm_irq_params.stream,
507 &acrtc->dm_irq_params.vrr_params);
509 dc_stream_adjust_vmin_vmax(
511 acrtc->dm_irq_params.stream,
512 &acrtc->dm_irq_params.vrr_params.adjust);
513 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 * dm_crtc_high_irq() - Handles CRTC interrupt
521 * @interrupt_params: used for determining the CRTC instance
523 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 static void dm_crtc_high_irq(void *interrupt_params)
528 struct common_irq_params *irq_params = interrupt_params;
529 struct amdgpu_device *adev = irq_params->adev;
530 struct amdgpu_crtc *acrtc;
534 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
538 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
540 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 vrr_active, acrtc->dm_irq_params.active_planes);
544 * Core vblank handling at start of front-porch is only possible
545 * in non-vrr mode, as only there vblank timestamping will give
546 * valid results while done in front-porch. Otherwise defer it
547 * to dm_vupdate_high_irq after end of front-porch.
550 drm_crtc_handle_vblank(&acrtc->base);
553 * Following stuff must happen at start of vblank, for crc
554 * computation and below-the-range btr support in vrr mode.
556 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
558 /* BTR updates need to happen before VUPDATE on Vega and above. */
559 if (adev->family < AMDGPU_FAMILY_AI)
562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
564 if (acrtc->dm_irq_params.stream &&
565 acrtc->dm_irq_params.vrr_params.supported &&
566 acrtc->dm_irq_params.freesync_config.state ==
567 VRR_STATE_ACTIVE_VARIABLE) {
568 mod_freesync_handle_v_update(adev->dm.freesync_module,
569 acrtc->dm_irq_params.stream,
570 &acrtc->dm_irq_params.vrr_params);
572 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 &acrtc->dm_irq_params.vrr_params.adjust);
577 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 * In that case, pageflip completion interrupts won't fire and pageflip
579 * completion events won't get delivered. Prevent this by sending
580 * pending pageflip events from here if a flip is still pending.
582 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 * avoid race conditions between flip programming and completion,
584 * which could cause too early flip completion events.
586 if (adev->family >= AMDGPU_FAMILY_RV &&
587 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 acrtc->dm_irq_params.active_planes == 0) {
590 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
592 drm_crtc_vblank_put(&acrtc->base);
594 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
603 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604 * DCN generation ASICs
605 * @interrupt_params: interrupt parameters
607 * Used to set crc window/read out crc value at vertical line 0 position
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
611 struct common_irq_params *irq_params = interrupt_params;
612 struct amdgpu_device *adev = irq_params->adev;
613 struct amdgpu_crtc *acrtc;
615 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
626 * @interrupt_params: used for determining the Outbox instance
628 * Handles the Outbox Interrupt
631 #define DMUB_TRACE_MAX_READ 64
632 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
634 struct dmub_notification notify;
635 struct common_irq_params *irq_params = interrupt_params;
636 struct amdgpu_device *adev = irq_params->adev;
637 struct amdgpu_display_manager *dm = &adev->dm;
638 struct dmcub_trace_buf_entry entry = { 0 };
641 if (dc_enable_dmub_notifications(adev->dm.dc)) {
642 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
644 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
645 } while (notify.pending_notification);
647 if (adev->dm.dmub_notify)
648 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification));
649 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
650 complete(&adev->dm.dmub_aux_transfer_done);
651 // TODO : HPD Implementation
654 DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
660 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
661 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
662 entry.param0, entry.param1);
664 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
665 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
671 } while (count <= DMUB_TRACE_MAX_READ);
673 ASSERT(count <= DMUB_TRACE_MAX_READ);
677 static int dm_set_clockgating_state(void *handle,
678 enum amd_clockgating_state state)
683 static int dm_set_powergating_state(void *handle,
684 enum amd_powergating_state state)
689 /* Prototypes of private functions */
690 static int dm_early_init(void* handle);
692 /* Allocate memory for FBC compressed data */
693 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
695 struct drm_device *dev = connector->dev;
696 struct amdgpu_device *adev = drm_to_adev(dev);
697 struct dm_compressor_info *compressor = &adev->dm.compressor;
698 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
699 struct drm_display_mode *mode;
700 unsigned long max_size = 0;
702 if (adev->dm.dc->fbc_compressor == NULL)
705 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
708 if (compressor->bo_ptr)
712 list_for_each_entry(mode, &connector->modes, head) {
713 if (max_size < mode->htotal * mode->vtotal)
714 max_size = mode->htotal * mode->vtotal;
718 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
719 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
720 &compressor->gpu_addr, &compressor->cpu_addr);
723 DRM_ERROR("DM: Failed to initialize FBC\n");
725 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
726 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
733 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
734 int pipe, bool *enabled,
735 unsigned char *buf, int max_bytes)
737 struct drm_device *dev = dev_get_drvdata(kdev);
738 struct amdgpu_device *adev = drm_to_adev(dev);
739 struct drm_connector *connector;
740 struct drm_connector_list_iter conn_iter;
741 struct amdgpu_dm_connector *aconnector;
746 mutex_lock(&adev->dm.audio_lock);
748 drm_connector_list_iter_begin(dev, &conn_iter);
749 drm_for_each_connector_iter(connector, &conn_iter) {
750 aconnector = to_amdgpu_dm_connector(connector);
751 if (aconnector->audio_inst != port)
755 ret = drm_eld_size(connector->eld);
756 memcpy(buf, connector->eld, min(max_bytes, ret));
760 drm_connector_list_iter_end(&conn_iter);
762 mutex_unlock(&adev->dm.audio_lock);
764 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
769 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
770 .get_eld = amdgpu_dm_audio_component_get_eld,
773 static int amdgpu_dm_audio_component_bind(struct device *kdev,
774 struct device *hda_kdev, void *data)
776 struct drm_device *dev = dev_get_drvdata(kdev);
777 struct amdgpu_device *adev = drm_to_adev(dev);
778 struct drm_audio_component *acomp = data;
780 acomp->ops = &amdgpu_dm_audio_component_ops;
782 adev->dm.audio_component = acomp;
787 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
788 struct device *hda_kdev, void *data)
790 struct drm_device *dev = dev_get_drvdata(kdev);
791 struct amdgpu_device *adev = drm_to_adev(dev);
792 struct drm_audio_component *acomp = data;
796 adev->dm.audio_component = NULL;
799 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
800 .bind = amdgpu_dm_audio_component_bind,
801 .unbind = amdgpu_dm_audio_component_unbind,
804 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
811 adev->mode_info.audio.enabled = true;
813 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
815 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
816 adev->mode_info.audio.pin[i].channels = -1;
817 adev->mode_info.audio.pin[i].rate = -1;
818 adev->mode_info.audio.pin[i].bits_per_sample = -1;
819 adev->mode_info.audio.pin[i].status_bits = 0;
820 adev->mode_info.audio.pin[i].category_code = 0;
821 adev->mode_info.audio.pin[i].connected = false;
822 adev->mode_info.audio.pin[i].id =
823 adev->dm.dc->res_pool->audios[i]->inst;
824 adev->mode_info.audio.pin[i].offset = 0;
827 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
831 adev->dm.audio_registered = true;
836 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
841 if (!adev->mode_info.audio.enabled)
844 if (adev->dm.audio_registered) {
845 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
846 adev->dm.audio_registered = false;
849 /* TODO: Disable audio? */
851 adev->mode_info.audio.enabled = false;
854 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
856 struct drm_audio_component *acomp = adev->dm.audio_component;
858 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
859 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
861 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
866 static int dm_dmub_hw_init(struct amdgpu_device *adev)
868 const struct dmcub_firmware_header_v1_0 *hdr;
869 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
870 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
871 const struct firmware *dmub_fw = adev->dm.dmub_fw;
872 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
873 struct abm *abm = adev->dm.dc->res_pool->abm;
874 struct dmub_srv_hw_params hw_params;
875 enum dmub_status status;
876 const unsigned char *fw_inst_const, *fw_bss_data;
877 uint32_t i, fw_inst_const_size, fw_bss_data_size;
881 /* DMUB isn't supported on the ASIC. */
885 DRM_ERROR("No framebuffer info for DMUB service.\n");
890 /* Firmware required for DMUB support. */
891 DRM_ERROR("No firmware provided for DMUB.\n");
895 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
896 if (status != DMUB_STATUS_OK) {
897 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
901 if (!has_hw_support) {
902 DRM_INFO("DMUB unsupported on ASIC\n");
906 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
908 fw_inst_const = dmub_fw->data +
909 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
912 fw_bss_data = dmub_fw->data +
913 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 le32_to_cpu(hdr->inst_const_bytes);
916 /* Copy firmware and bios info into FB memory. */
917 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
918 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
920 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
922 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
923 * amdgpu_ucode_init_single_fw will load dmub firmware
924 * fw_inst_const part to cw0; otherwise, the firmware back door load
925 * will be done by dm_dmub_hw_init
927 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
928 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
932 if (fw_bss_data_size)
933 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
934 fw_bss_data, fw_bss_data_size);
936 /* Copy firmware bios info into FB memory. */
937 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
940 /* Reset regions that need to be reset. */
941 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
942 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
944 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
945 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
947 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
948 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
950 /* Initialize hardware. */
951 memset(&hw_params, 0, sizeof(hw_params));
952 hw_params.fb_base = adev->gmc.fb_start;
953 hw_params.fb_offset = adev->gmc.aper_base;
955 /* backdoor load firmware and trigger dmub running */
956 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
957 hw_params.load_inst_const = true;
960 hw_params.psp_version = dmcu->psp_version;
962 for (i = 0; i < fb_info->num_fb; ++i)
963 hw_params.fb[i] = &fb_info->fb[i];
965 status = dmub_srv_hw_init(dmub_srv, &hw_params);
966 if (status != DMUB_STATUS_OK) {
967 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
971 /* Wait for firmware load to finish. */
972 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
973 if (status != DMUB_STATUS_OK)
974 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
976 /* Init DMCU and ABM if available. */
978 dmcu->funcs->dmcu_init(dmcu);
979 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
982 if (!adev->dm.dc->ctx->dmub_srv)
983 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
984 if (!adev->dm.dc->ctx->dmub_srv) {
985 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
989 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
990 adev->dm.dmcub_fw_version);
995 #if defined(CONFIG_DRM_AMD_DC_DCN)
996 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
999 uint32_t logical_addr_low;
1000 uint32_t logical_addr_high;
1001 uint32_t agp_base, agp_bot, agp_top;
1002 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1004 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1005 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009 * Raven2 has a HW issue that it is unable to use the vram which
1010 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1011 * workaround that increase system aperture high address (add 1)
1012 * to get rid of the VM fault and hardware hang.
1014 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1019 agp_bot = adev->gmc.agp_start >> 24;
1020 agp_top = adev->gmc.agp_end >> 24;
1023 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1024 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1025 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1026 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1027 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1028 page_table_base.low_part = lower_32_bits(pt_base);
1030 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1031 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1034 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1035 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1038 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1039 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1042 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1043 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045 pa_config->is_hvm_enabled = 0;
1049 #if defined(CONFIG_DRM_AMD_DC_DCN)
1050 static void event_mall_stutter(struct work_struct *work)
1053 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1054 struct amdgpu_display_manager *dm = vblank_work->dm;
1056 mutex_lock(&dm->dc_lock);
1058 if (vblank_work->enable)
1059 dm->active_vblank_irq_count++;
1060 else if(dm->active_vblank_irq_count)
1061 dm->active_vblank_irq_count--;
1063 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067 mutex_unlock(&dm->dc_lock);
1070 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1073 int max_caps = dc->caps.max_links;
1074 struct vblank_workqueue *vblank_work;
1077 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1078 if (ZERO_OR_NULL_PTR(vblank_work)) {
1083 for (i = 0; i < max_caps; i++)
1084 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1089 static int amdgpu_dm_init(struct amdgpu_device *adev)
1091 struct dc_init_data init_data;
1092 #ifdef CONFIG_DRM_AMD_DC_HDCP
1093 struct dc_callback_init init_params;
1097 adev->dm.ddev = adev_to_drm(adev);
1098 adev->dm.adev = adev;
1100 /* Zero all the fields */
1101 memset(&init_data, 0, sizeof(init_data));
1102 #ifdef CONFIG_DRM_AMD_DC_HDCP
1103 memset(&init_params, 0, sizeof(init_params));
1106 mutex_init(&adev->dm.dc_lock);
1107 mutex_init(&adev->dm.audio_lock);
1108 #if defined(CONFIG_DRM_AMD_DC_DCN)
1109 spin_lock_init(&adev->dm.vblank_lock);
1112 if(amdgpu_dm_irq_init(adev)) {
1113 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1117 init_data.asic_id.chip_family = adev->family;
1119 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1120 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1122 init_data.asic_id.vram_width = adev->gmc.vram_width;
1123 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1124 init_data.asic_id.atombios_base_address =
1125 adev->mode_info.atom_context->bios;
1127 init_data.driver = adev;
1129 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1131 if (!adev->dm.cgs_device) {
1132 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1136 init_data.cgs_device = adev->dm.cgs_device;
1138 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1140 switch (adev->asic_type) {
1145 init_data.flags.gpu_vm_support = true;
1146 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1147 init_data.flags.disable_dmcu = true;
1149 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151 init_data.flags.gpu_vm_support = true;
1158 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1159 init_data.flags.fbc_support = true;
1161 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1162 init_data.flags.multi_mon_pp_mclk_switch = true;
1164 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1165 init_data.flags.disable_fractional_pwm = true;
1167 init_data.flags.power_down_display_on_boot = true;
1169 INIT_LIST_HEAD(&adev->dm.da_list);
1170 /* Display Core create. */
1171 adev->dm.dc = dc_create(&init_data);
1174 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1176 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1180 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1181 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1182 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1185 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1186 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1188 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1189 adev->dm.dc->debug.disable_stutter = true;
1191 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1192 adev->dm.dc->debug.disable_dsc = true;
1194 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1195 adev->dm.dc->debug.disable_clock_gate = true;
1197 r = dm_dmub_hw_init(adev);
1199 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1203 dc_hardware_init(adev->dm.dc);
1205 #if defined(CONFIG_DRM_AMD_DC_DCN)
1206 if (adev->apu_flags) {
1207 struct dc_phy_addr_space_config pa_config;
1209 mmhub_read_system_context(adev, &pa_config);
1211 // Call the DC init_memory func
1212 dc_setup_system_context(adev->dm.dc, &pa_config);
1216 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1217 if (!adev->dm.freesync_module) {
1219 "amdgpu: failed to initialize freesync_module.\n");
1221 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1222 adev->dm.freesync_module);
1224 amdgpu_dm_init_color_mod();
1226 #if defined(CONFIG_DRM_AMD_DC_DCN)
1227 if (adev->dm.dc->caps.max_links > 0) {
1228 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1230 if (!adev->dm.vblank_workqueue)
1231 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1233 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1237 #ifdef CONFIG_DRM_AMD_DC_HDCP
1238 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1239 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1241 if (!adev->dm.hdcp_workqueue)
1242 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1244 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1246 dc_init_callbacks(adev->dm.dc, &init_params);
1249 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1250 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1252 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1253 init_completion(&adev->dm.dmub_aux_transfer_done);
1254 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1255 if (!adev->dm.dmub_notify) {
1256 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1259 amdgpu_dm_outbox_init(adev);
1262 if (amdgpu_dm_initialize_drm_device(adev)) {
1264 "amdgpu: failed to initialize sw for display support.\n");
1268 /* create fake encoders for MST */
1269 dm_dp_create_fake_mst_encoders(adev);
1271 /* TODO: Add_display_info? */
1273 /* TODO use dynamic cursor width */
1274 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1275 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1277 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1279 "amdgpu: failed to initialize sw for display support.\n");
1284 DRM_DEBUG_DRIVER("KMS initialized.\n");
1288 amdgpu_dm_fini(adev);
1293 static int amdgpu_dm_early_fini(void *handle)
1295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 amdgpu_dm_audio_fini(adev);
1302 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1306 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1307 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1310 amdgpu_dm_destroy_drm_device(&adev->dm);
1312 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1313 if (adev->dm.crc_rd_wrk) {
1314 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1315 kfree(adev->dm.crc_rd_wrk);
1316 adev->dm.crc_rd_wrk = NULL;
1319 #ifdef CONFIG_DRM_AMD_DC_HDCP
1320 if (adev->dm.hdcp_workqueue) {
1321 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1322 adev->dm.hdcp_workqueue = NULL;
1326 dc_deinit_callbacks(adev->dm.dc);
1329 #if defined(CONFIG_DRM_AMD_DC_DCN)
1330 if (adev->dm.vblank_workqueue) {
1331 adev->dm.vblank_workqueue->dm = NULL;
1332 kfree(adev->dm.vblank_workqueue);
1333 adev->dm.vblank_workqueue = NULL;
1337 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1339 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1340 kfree(adev->dm.dmub_notify);
1341 adev->dm.dmub_notify = NULL;
1344 if (adev->dm.dmub_bo)
1345 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1346 &adev->dm.dmub_bo_gpu_addr,
1347 &adev->dm.dmub_bo_cpu_addr);
1349 /* DC Destroy TODO: Replace destroy DAL */
1351 dc_destroy(&adev->dm.dc);
1353 * TODO: pageflip, vlank interrupt
1355 * amdgpu_dm_irq_fini(adev);
1358 if (adev->dm.cgs_device) {
1359 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1360 adev->dm.cgs_device = NULL;
1362 if (adev->dm.freesync_module) {
1363 mod_freesync_destroy(adev->dm.freesync_module);
1364 adev->dm.freesync_module = NULL;
1367 mutex_destroy(&adev->dm.audio_lock);
1368 mutex_destroy(&adev->dm.dc_lock);
1373 static int load_dmcu_fw(struct amdgpu_device *adev)
1375 const char *fw_name_dmcu = NULL;
1377 const struct dmcu_firmware_header_v1_0 *hdr;
1379 switch(adev->asic_type) {
1380 #if defined(CONFIG_DRM_AMD_DC_SI)
1395 case CHIP_POLARIS11:
1396 case CHIP_POLARIS10:
1397 case CHIP_POLARIS12:
1405 case CHIP_SIENNA_CICHLID:
1406 case CHIP_NAVY_FLOUNDER:
1407 case CHIP_DIMGREY_CAVEFISH:
1408 case CHIP_BEIGE_GOBY:
1412 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1415 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1416 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1417 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1418 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1423 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1427 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1428 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1432 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1434 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1435 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1436 adev->dm.fw_dmcu = NULL;
1440 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1445 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1447 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1449 release_firmware(adev->dm.fw_dmcu);
1450 adev->dm.fw_dmcu = NULL;
1454 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1455 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1457 adev->firmware.fw_size +=
1458 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1460 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1461 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1462 adev->firmware.fw_size +=
1463 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1465 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1467 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1472 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1474 struct amdgpu_device *adev = ctx;
1476 return dm_read_reg(adev->dm.dc->ctx, address);
1479 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1482 struct amdgpu_device *adev = ctx;
1484 return dm_write_reg(adev->dm.dc->ctx, address, value);
1487 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1489 struct dmub_srv_create_params create_params;
1490 struct dmub_srv_region_params region_params;
1491 struct dmub_srv_region_info region_info;
1492 struct dmub_srv_fb_params fb_params;
1493 struct dmub_srv_fb_info *fb_info;
1494 struct dmub_srv *dmub_srv;
1495 const struct dmcub_firmware_header_v1_0 *hdr;
1496 const char *fw_name_dmub;
1497 enum dmub_asic dmub_asic;
1498 enum dmub_status status;
1501 switch (adev->asic_type) {
1503 dmub_asic = DMUB_ASIC_DCN21;
1504 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1505 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1506 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1508 case CHIP_SIENNA_CICHLID:
1509 dmub_asic = DMUB_ASIC_DCN30;
1510 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1512 case CHIP_NAVY_FLOUNDER:
1513 dmub_asic = DMUB_ASIC_DCN30;
1514 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1517 dmub_asic = DMUB_ASIC_DCN301;
1518 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1520 case CHIP_DIMGREY_CAVEFISH:
1521 dmub_asic = DMUB_ASIC_DCN302;
1522 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1524 case CHIP_BEIGE_GOBY:
1525 dmub_asic = DMUB_ASIC_DCN303;
1526 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1530 /* ASIC doesn't support DMUB. */
1534 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1536 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1540 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1542 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1546 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1548 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1549 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1550 AMDGPU_UCODE_ID_DMCUB;
1551 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1553 adev->firmware.fw_size +=
1554 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1556 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1557 adev->dm.dmcub_fw_version);
1560 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1562 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1563 dmub_srv = adev->dm.dmub_srv;
1566 DRM_ERROR("Failed to allocate DMUB service!\n");
1570 memset(&create_params, 0, sizeof(create_params));
1571 create_params.user_ctx = adev;
1572 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1573 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1574 create_params.asic = dmub_asic;
1576 /* Create the DMUB service. */
1577 status = dmub_srv_create(dmub_srv, &create_params);
1578 if (status != DMUB_STATUS_OK) {
1579 DRM_ERROR("Error creating DMUB service: %d\n", status);
1583 /* Calculate the size of all the regions for the DMUB service. */
1584 memset(®ion_params, 0, sizeof(region_params));
1586 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1587 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1588 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1589 region_params.vbios_size = adev->bios_size;
1590 region_params.fw_bss_data = region_params.bss_data_size ?
1591 adev->dm.dmub_fw->data +
1592 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1593 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1594 region_params.fw_inst_const =
1595 adev->dm.dmub_fw->data +
1596 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1599 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1602 if (status != DMUB_STATUS_OK) {
1603 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1608 * Allocate a framebuffer based on the total size of all the regions.
1609 * TODO: Move this into GART.
1611 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1612 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1613 &adev->dm.dmub_bo_gpu_addr,
1614 &adev->dm.dmub_bo_cpu_addr);
1618 /* Rebase the regions on the framebuffer address. */
1619 memset(&fb_params, 0, sizeof(fb_params));
1620 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1621 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1622 fb_params.region_info = ®ion_info;
1624 adev->dm.dmub_fb_info =
1625 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1626 fb_info = adev->dm.dmub_fb_info;
1630 "Failed to allocate framebuffer info for DMUB service!\n");
1634 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1635 if (status != DMUB_STATUS_OK) {
1636 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1643 static int dm_sw_init(void *handle)
1645 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1648 r = dm_dmub_sw_init(adev);
1652 return load_dmcu_fw(adev);
1655 static int dm_sw_fini(void *handle)
1657 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1659 kfree(adev->dm.dmub_fb_info);
1660 adev->dm.dmub_fb_info = NULL;
1662 if (adev->dm.dmub_srv) {
1663 dmub_srv_destroy(adev->dm.dmub_srv);
1664 adev->dm.dmub_srv = NULL;
1667 release_firmware(adev->dm.dmub_fw);
1668 adev->dm.dmub_fw = NULL;
1670 release_firmware(adev->dm.fw_dmcu);
1671 adev->dm.fw_dmcu = NULL;
1676 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1678 struct amdgpu_dm_connector *aconnector;
1679 struct drm_connector *connector;
1680 struct drm_connector_list_iter iter;
1683 drm_connector_list_iter_begin(dev, &iter);
1684 drm_for_each_connector_iter(connector, &iter) {
1685 aconnector = to_amdgpu_dm_connector(connector);
1686 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1687 aconnector->mst_mgr.aux) {
1688 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1690 aconnector->base.base.id);
1692 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1694 DRM_ERROR("DM_MST: Failed to start MST\n");
1695 aconnector->dc_link->type =
1696 dc_connection_single;
1701 drm_connector_list_iter_end(&iter);
1706 static int dm_late_init(void *handle)
1708 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 struct dmcu_iram_parameters params;
1711 unsigned int linear_lut[16];
1713 struct dmcu *dmcu = NULL;
1715 dmcu = adev->dm.dc->res_pool->dmcu;
1717 for (i = 0; i < 16; i++)
1718 linear_lut[i] = 0xFFFF * i / 15;
1721 params.backlight_ramping_start = 0xCCCC;
1722 params.backlight_ramping_reduction = 0xCCCCCCCC;
1723 params.backlight_lut_array_size = 16;
1724 params.backlight_lut_array = linear_lut;
1726 /* Min backlight level after ABM reduction, Don't allow below 1%
1727 * 0xFFFF x 0.01 = 0x28F
1729 params.min_abm_backlight = 0x28F;
1730 /* In the case where abm is implemented on dmcub,
1731 * dmcu object will be null.
1732 * ABM 2.4 and up are implemented on dmcub.
1735 if (!dmcu_load_iram(dmcu, params))
1737 } else if (adev->dm.dc->ctx->dmub_srv) {
1738 struct dc_link *edp_links[MAX_NUM_EDP];
1741 get_edp_links(adev->dm.dc, edp_links, &edp_num);
1742 for (i = 0; i < edp_num; i++) {
1743 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1748 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1751 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1753 struct amdgpu_dm_connector *aconnector;
1754 struct drm_connector *connector;
1755 struct drm_connector_list_iter iter;
1756 struct drm_dp_mst_topology_mgr *mgr;
1758 bool need_hotplug = false;
1760 drm_connector_list_iter_begin(dev, &iter);
1761 drm_for_each_connector_iter(connector, &iter) {
1762 aconnector = to_amdgpu_dm_connector(connector);
1763 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1764 aconnector->mst_port)
1767 mgr = &aconnector->mst_mgr;
1770 drm_dp_mst_topology_mgr_suspend(mgr);
1772 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1774 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1775 need_hotplug = true;
1779 drm_connector_list_iter_end(&iter);
1782 drm_kms_helper_hotplug_event(dev);
1785 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1787 struct smu_context *smu = &adev->smu;
1790 if (!is_support_sw_smu(adev))
1793 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1794 * on window driver dc implementation.
1795 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1796 * should be passed to smu during boot up and resume from s3.
1797 * boot up: dc calculate dcn watermark clock settings within dc_create,
1798 * dcn20_resource_construct
1799 * then call pplib functions below to pass the settings to smu:
1800 * smu_set_watermarks_for_clock_ranges
1801 * smu_set_watermarks_table
1802 * navi10_set_watermarks_table
1803 * smu_write_watermarks_table
1805 * For Renoir, clock settings of dcn watermark are also fixed values.
1806 * dc has implemented different flow for window driver:
1807 * dc_hardware_init / dc_set_power_state
1812 * smu_set_watermarks_for_clock_ranges
1813 * renoir_set_watermarks_table
1814 * smu_write_watermarks_table
1817 * dc_hardware_init -> amdgpu_dm_init
1818 * dc_set_power_state --> dm_resume
1820 * therefore, this function apply to navi10/12/14 but not Renoir
1823 switch(adev->asic_type) {
1832 ret = smu_write_watermarks_table(smu);
1834 DRM_ERROR("Failed to update WMTABLE!\n");
1842 * dm_hw_init() - Initialize DC device
1843 * @handle: The base driver device containing the amdgpu_dm device.
1845 * Initialize the &struct amdgpu_display_manager device. This involves calling
1846 * the initializers of each DM component, then populating the struct with them.
1848 * Although the function implies hardware initialization, both hardware and
1849 * software are initialized here. Splitting them out to their relevant init
1850 * hooks is a future TODO item.
1852 * Some notable things that are initialized here:
1854 * - Display Core, both software and hardware
1855 * - DC modules that we need (freesync and color management)
1856 * - DRM software states
1857 * - Interrupt sources and handlers
1859 * - Debug FS entries, if enabled
1861 static int dm_hw_init(void *handle)
1863 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1864 /* Create DAL display manager */
1865 amdgpu_dm_init(adev);
1866 amdgpu_dm_hpd_init(adev);
1872 * dm_hw_fini() - Teardown DC device
1873 * @handle: The base driver device containing the amdgpu_dm device.
1875 * Teardown components within &struct amdgpu_display_manager that require
1876 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1877 * were loaded. Also flush IRQ workqueues and disable them.
1879 static int dm_hw_fini(void *handle)
1881 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1883 amdgpu_dm_hpd_fini(adev);
1885 amdgpu_dm_irq_fini(adev);
1886 amdgpu_dm_fini(adev);
1891 static int dm_enable_vblank(struct drm_crtc *crtc);
1892 static void dm_disable_vblank(struct drm_crtc *crtc);
1894 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1895 struct dc_state *state, bool enable)
1897 enum dc_irq_source irq_source;
1898 struct amdgpu_crtc *acrtc;
1902 for (i = 0; i < state->stream_count; i++) {
1903 acrtc = get_crtc_by_otg_inst(
1904 adev, state->stream_status[i].primary_otg_inst);
1906 if (acrtc && state->stream_status[i].plane_count != 0) {
1907 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1908 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1909 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1910 acrtc->crtc_id, enable ? "en" : "dis", rc);
1912 DRM_WARN("Failed to %s pflip interrupts\n",
1913 enable ? "enable" : "disable");
1916 rc = dm_enable_vblank(&acrtc->base);
1918 DRM_WARN("Failed to enable vblank interrupts\n");
1920 dm_disable_vblank(&acrtc->base);
1928 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1930 struct dc_state *context = NULL;
1931 enum dc_status res = DC_ERROR_UNEXPECTED;
1933 struct dc_stream_state *del_streams[MAX_PIPES];
1934 int del_streams_count = 0;
1936 memset(del_streams, 0, sizeof(del_streams));
1938 context = dc_create_state(dc);
1939 if (context == NULL)
1940 goto context_alloc_fail;
1942 dc_resource_state_copy_construct_current(dc, context);
1944 /* First remove from context all streams */
1945 for (i = 0; i < context->stream_count; i++) {
1946 struct dc_stream_state *stream = context->streams[i];
1948 del_streams[del_streams_count++] = stream;
1951 /* Remove all planes for removed streams and then remove the streams */
1952 for (i = 0; i < del_streams_count; i++) {
1953 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1954 res = DC_FAIL_DETACH_SURFACES;
1958 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1964 res = dc_validate_global_state(dc, context, false);
1967 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1971 res = dc_commit_state(dc, context);
1974 dc_release_state(context);
1980 static int dm_suspend(void *handle)
1982 struct amdgpu_device *adev = handle;
1983 struct amdgpu_display_manager *dm = &adev->dm;
1986 if (amdgpu_in_reset(adev)) {
1987 mutex_lock(&dm->dc_lock);
1989 #if defined(CONFIG_DRM_AMD_DC_DCN)
1990 dc_allow_idle_optimizations(adev->dm.dc, false);
1993 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1995 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1997 amdgpu_dm_commit_zero_streams(dm->dc);
1999 amdgpu_dm_irq_suspend(adev);
2004 WARN_ON(adev->dm.cached_state);
2005 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2007 s3_handle_mst(adev_to_drm(adev), true);
2009 amdgpu_dm_irq_suspend(adev);
2011 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2016 static struct amdgpu_dm_connector *
2017 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2018 struct drm_crtc *crtc)
2021 struct drm_connector_state *new_con_state;
2022 struct drm_connector *connector;
2023 struct drm_crtc *crtc_from_state;
2025 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2026 crtc_from_state = new_con_state->crtc;
2028 if (crtc_from_state == crtc)
2029 return to_amdgpu_dm_connector(connector);
2035 static void emulated_link_detect(struct dc_link *link)
2037 struct dc_sink_init_data sink_init_data = { 0 };
2038 struct display_sink_capability sink_caps = { 0 };
2039 enum dc_edid_status edid_status;
2040 struct dc_context *dc_ctx = link->ctx;
2041 struct dc_sink *sink = NULL;
2042 struct dc_sink *prev_sink = NULL;
2044 link->type = dc_connection_none;
2045 prev_sink = link->local_sink;
2048 dc_sink_release(prev_sink);
2050 switch (link->connector_signal) {
2051 case SIGNAL_TYPE_HDMI_TYPE_A: {
2052 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2053 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2057 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2058 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2059 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2063 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2064 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2065 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2069 case SIGNAL_TYPE_LVDS: {
2070 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2071 sink_caps.signal = SIGNAL_TYPE_LVDS;
2075 case SIGNAL_TYPE_EDP: {
2076 sink_caps.transaction_type =
2077 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2078 sink_caps.signal = SIGNAL_TYPE_EDP;
2082 case SIGNAL_TYPE_DISPLAY_PORT: {
2083 sink_caps.transaction_type =
2084 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2085 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2090 DC_ERROR("Invalid connector type! signal:%d\n",
2091 link->connector_signal);
2095 sink_init_data.link = link;
2096 sink_init_data.sink_signal = sink_caps.signal;
2098 sink = dc_sink_create(&sink_init_data);
2100 DC_ERROR("Failed to create sink!\n");
2104 /* dc_sink_create returns a new reference */
2105 link->local_sink = sink;
2107 edid_status = dm_helpers_read_local_edid(
2112 if (edid_status != EDID_OK)
2113 DC_ERROR("Failed to read EDID");
2117 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2118 struct amdgpu_display_manager *dm)
2121 struct dc_surface_update surface_updates[MAX_SURFACES];
2122 struct dc_plane_info plane_infos[MAX_SURFACES];
2123 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2124 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2125 struct dc_stream_update stream_update;
2129 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2132 dm_error("Failed to allocate update bundle\n");
2136 for (k = 0; k < dc_state->stream_count; k++) {
2137 bundle->stream_update.stream = dc_state->streams[k];
2139 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2140 bundle->surface_updates[m].surface =
2141 dc_state->stream_status->plane_states[m];
2142 bundle->surface_updates[m].surface->force_full_update =
2145 dc_commit_updates_for_stream(
2146 dm->dc, bundle->surface_updates,
2147 dc_state->stream_status->plane_count,
2148 dc_state->streams[k], &bundle->stream_update, dc_state);
2157 static void dm_set_dpms_off(struct dc_link *link)
2159 struct dc_stream_state *stream_state;
2160 struct amdgpu_dm_connector *aconnector = link->priv;
2161 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2162 struct dc_stream_update stream_update;
2163 bool dpms_off = true;
2165 memset(&stream_update, 0, sizeof(stream_update));
2166 stream_update.dpms_off = &dpms_off;
2168 mutex_lock(&adev->dm.dc_lock);
2169 stream_state = dc_stream_find_from_link(link);
2171 if (stream_state == NULL) {
2172 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2173 mutex_unlock(&adev->dm.dc_lock);
2177 stream_update.stream = stream_state;
2178 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2179 stream_state, &stream_update,
2180 stream_state->ctx->dc->current_state);
2181 mutex_unlock(&adev->dm.dc_lock);
2184 static int dm_resume(void *handle)
2186 struct amdgpu_device *adev = handle;
2187 struct drm_device *ddev = adev_to_drm(adev);
2188 struct amdgpu_display_manager *dm = &adev->dm;
2189 struct amdgpu_dm_connector *aconnector;
2190 struct drm_connector *connector;
2191 struct drm_connector_list_iter iter;
2192 struct drm_crtc *crtc;
2193 struct drm_crtc_state *new_crtc_state;
2194 struct dm_crtc_state *dm_new_crtc_state;
2195 struct drm_plane *plane;
2196 struct drm_plane_state *new_plane_state;
2197 struct dm_plane_state *dm_new_plane_state;
2198 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2199 enum dc_connection_type new_connection_type = dc_connection_none;
2200 struct dc_state *dc_state;
2203 if (amdgpu_in_reset(adev)) {
2204 dc_state = dm->cached_dc_state;
2206 r = dm_dmub_hw_init(adev);
2208 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2210 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2213 amdgpu_dm_irq_resume_early(adev);
2215 for (i = 0; i < dc_state->stream_count; i++) {
2216 dc_state->streams[i]->mode_changed = true;
2217 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2218 dc_state->stream_status->plane_states[j]->update_flags.raw
2223 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2225 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2227 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2229 dc_release_state(dm->cached_dc_state);
2230 dm->cached_dc_state = NULL;
2232 amdgpu_dm_irq_resume_late(adev);
2234 mutex_unlock(&dm->dc_lock);
2238 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2239 dc_release_state(dm_state->context);
2240 dm_state->context = dc_create_state(dm->dc);
2241 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2242 dc_resource_state_construct(dm->dc, dm_state->context);
2244 /* Before powering on DC we need to re-initialize DMUB. */
2245 r = dm_dmub_hw_init(adev);
2247 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2249 /* power on hardware */
2250 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2252 /* program HPD filter */
2256 * early enable HPD Rx IRQ, should be done before set mode as short
2257 * pulse interrupts are used for MST
2259 amdgpu_dm_irq_resume_early(adev);
2261 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2262 s3_handle_mst(ddev, false);
2265 drm_connector_list_iter_begin(ddev, &iter);
2266 drm_for_each_connector_iter(connector, &iter) {
2267 aconnector = to_amdgpu_dm_connector(connector);
2270 * this is the case when traversing through already created
2271 * MST connectors, should be skipped
2273 if (aconnector->mst_port)
2276 mutex_lock(&aconnector->hpd_lock);
2277 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2278 DRM_ERROR("KMS: Failed to detect connector\n");
2280 if (aconnector->base.force && new_connection_type == dc_connection_none)
2281 emulated_link_detect(aconnector->dc_link);
2283 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2285 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2286 aconnector->fake_enable = false;
2288 if (aconnector->dc_sink)
2289 dc_sink_release(aconnector->dc_sink);
2290 aconnector->dc_sink = NULL;
2291 amdgpu_dm_update_connector_after_detect(aconnector);
2292 mutex_unlock(&aconnector->hpd_lock);
2294 drm_connector_list_iter_end(&iter);
2296 /* Force mode set in atomic commit */
2297 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2298 new_crtc_state->active_changed = true;
2301 * atomic_check is expected to create the dc states. We need to release
2302 * them here, since they were duplicated as part of the suspend
2305 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2306 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2307 if (dm_new_crtc_state->stream) {
2308 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2309 dc_stream_release(dm_new_crtc_state->stream);
2310 dm_new_crtc_state->stream = NULL;
2314 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2315 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2316 if (dm_new_plane_state->dc_state) {
2317 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2318 dc_plane_state_release(dm_new_plane_state->dc_state);
2319 dm_new_plane_state->dc_state = NULL;
2323 drm_atomic_helper_resume(ddev, dm->cached_state);
2325 dm->cached_state = NULL;
2327 amdgpu_dm_irq_resume_late(adev);
2329 amdgpu_dm_smu_write_watermarks_table(adev);
2337 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2338 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2339 * the base driver's device list to be initialized and torn down accordingly.
2341 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2344 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2346 .early_init = dm_early_init,
2347 .late_init = dm_late_init,
2348 .sw_init = dm_sw_init,
2349 .sw_fini = dm_sw_fini,
2350 .early_fini = amdgpu_dm_early_fini,
2351 .hw_init = dm_hw_init,
2352 .hw_fini = dm_hw_fini,
2353 .suspend = dm_suspend,
2354 .resume = dm_resume,
2355 .is_idle = dm_is_idle,
2356 .wait_for_idle = dm_wait_for_idle,
2357 .check_soft_reset = dm_check_soft_reset,
2358 .soft_reset = dm_soft_reset,
2359 .set_clockgating_state = dm_set_clockgating_state,
2360 .set_powergating_state = dm_set_powergating_state,
2363 const struct amdgpu_ip_block_version dm_ip_block =
2365 .type = AMD_IP_BLOCK_TYPE_DCE,
2369 .funcs = &amdgpu_dm_funcs,
2379 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2380 .fb_create = amdgpu_display_user_framebuffer_create,
2381 .get_format_info = amd_get_format_info,
2382 .output_poll_changed = drm_fb_helper_output_poll_changed,
2383 .atomic_check = amdgpu_dm_atomic_check,
2384 .atomic_commit = drm_atomic_helper_commit,
2387 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2388 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2391 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2393 u32 max_cll, min_cll, max, min, q, r;
2394 struct amdgpu_dm_backlight_caps *caps;
2395 struct amdgpu_display_manager *dm;
2396 struct drm_connector *conn_base;
2397 struct amdgpu_device *adev;
2398 struct dc_link *link = NULL;
2399 static const u8 pre_computed_values[] = {
2400 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2401 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2403 if (!aconnector || !aconnector->dc_link)
2406 link = aconnector->dc_link;
2407 if (link->connector_signal != SIGNAL_TYPE_EDP)
2410 conn_base = &aconnector->base;
2411 adev = drm_to_adev(conn_base->dev);
2413 caps = &dm->backlight_caps;
2414 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2415 caps->aux_support = false;
2416 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2417 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2419 if (caps->ext_caps->bits.oled == 1 ||
2420 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2421 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2422 caps->aux_support = true;
2424 if (amdgpu_backlight == 0)
2425 caps->aux_support = false;
2426 else if (amdgpu_backlight == 1)
2427 caps->aux_support = true;
2429 /* From the specification (CTA-861-G), for calculating the maximum
2430 * luminance we need to use:
2431 * Luminance = 50*2**(CV/32)
2432 * Where CV is a one-byte value.
2433 * For calculating this expression we may need float point precision;
2434 * to avoid this complexity level, we take advantage that CV is divided
2435 * by a constant. From the Euclids division algorithm, we know that CV
2436 * can be written as: CV = 32*q + r. Next, we replace CV in the
2437 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2438 * need to pre-compute the value of r/32. For pre-computing the values
2439 * We just used the following Ruby line:
2440 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2441 * The results of the above expressions can be verified at
2442 * pre_computed_values.
2446 max = (1 << q) * pre_computed_values[r];
2448 // min luminance: maxLum * (CV/255)^2 / 100
2449 q = DIV_ROUND_CLOSEST(min_cll, 255);
2450 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2452 caps->aux_max_input_signal = max;
2453 caps->aux_min_input_signal = min;
2456 void amdgpu_dm_update_connector_after_detect(
2457 struct amdgpu_dm_connector *aconnector)
2459 struct drm_connector *connector = &aconnector->base;
2460 struct drm_device *dev = connector->dev;
2461 struct dc_sink *sink;
2463 /* MST handled by drm_mst framework */
2464 if (aconnector->mst_mgr.mst_state == true)
2467 sink = aconnector->dc_link->local_sink;
2469 dc_sink_retain(sink);
2472 * Edid mgmt connector gets first update only in mode_valid hook and then
2473 * the connector sink is set to either fake or physical sink depends on link status.
2474 * Skip if already done during boot.
2476 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2477 && aconnector->dc_em_sink) {
2480 * For S3 resume with headless use eml_sink to fake stream
2481 * because on resume connector->sink is set to NULL
2483 mutex_lock(&dev->mode_config.mutex);
2486 if (aconnector->dc_sink) {
2487 amdgpu_dm_update_freesync_caps(connector, NULL);
2489 * retain and release below are used to
2490 * bump up refcount for sink because the link doesn't point
2491 * to it anymore after disconnect, so on next crtc to connector
2492 * reshuffle by UMD we will get into unwanted dc_sink release
2494 dc_sink_release(aconnector->dc_sink);
2496 aconnector->dc_sink = sink;
2497 dc_sink_retain(aconnector->dc_sink);
2498 amdgpu_dm_update_freesync_caps(connector,
2501 amdgpu_dm_update_freesync_caps(connector, NULL);
2502 if (!aconnector->dc_sink) {
2503 aconnector->dc_sink = aconnector->dc_em_sink;
2504 dc_sink_retain(aconnector->dc_sink);
2508 mutex_unlock(&dev->mode_config.mutex);
2511 dc_sink_release(sink);
2516 * TODO: temporary guard to look for proper fix
2517 * if this sink is MST sink, we should not do anything
2519 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2520 dc_sink_release(sink);
2524 if (aconnector->dc_sink == sink) {
2526 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2529 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2530 aconnector->connector_id);
2532 dc_sink_release(sink);
2536 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2537 aconnector->connector_id, aconnector->dc_sink, sink);
2539 mutex_lock(&dev->mode_config.mutex);
2542 * 1. Update status of the drm connector
2543 * 2. Send an event and let userspace tell us what to do
2547 * TODO: check if we still need the S3 mode update workaround.
2548 * If yes, put it here.
2550 if (aconnector->dc_sink) {
2551 amdgpu_dm_update_freesync_caps(connector, NULL);
2552 dc_sink_release(aconnector->dc_sink);
2555 aconnector->dc_sink = sink;
2556 dc_sink_retain(aconnector->dc_sink);
2557 if (sink->dc_edid.length == 0) {
2558 aconnector->edid = NULL;
2559 if (aconnector->dc_link->aux_mode) {
2560 drm_dp_cec_unset_edid(
2561 &aconnector->dm_dp_aux.aux);
2565 (struct edid *)sink->dc_edid.raw_edid;
2567 drm_connector_update_edid_property(connector,
2569 if (aconnector->dc_link->aux_mode)
2570 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2574 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2575 update_connector_ext_caps(aconnector);
2577 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2578 amdgpu_dm_update_freesync_caps(connector, NULL);
2579 drm_connector_update_edid_property(connector, NULL);
2580 aconnector->num_modes = 0;
2581 dc_sink_release(aconnector->dc_sink);
2582 aconnector->dc_sink = NULL;
2583 aconnector->edid = NULL;
2584 #ifdef CONFIG_DRM_AMD_DC_HDCP
2585 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2586 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2587 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2591 mutex_unlock(&dev->mode_config.mutex);
2593 update_subconnector_property(aconnector);
2596 dc_sink_release(sink);
2599 static void handle_hpd_irq(void *param)
2601 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2602 struct drm_connector *connector = &aconnector->base;
2603 struct drm_device *dev = connector->dev;
2604 enum dc_connection_type new_connection_type = dc_connection_none;
2605 struct amdgpu_device *adev = drm_to_adev(dev);
2606 #ifdef CONFIG_DRM_AMD_DC_HDCP
2607 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2610 if (adev->dm.disable_hpd_irq)
2614 * In case of failure or MST no need to update connector status or notify the OS
2615 * since (for MST case) MST does this in its own context.
2617 mutex_lock(&aconnector->hpd_lock);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 if (adev->dm.hdcp_workqueue) {
2621 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2622 dm_con_state->update_hdcp = true;
2625 if (aconnector->fake_enable)
2626 aconnector->fake_enable = false;
2628 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2629 DRM_ERROR("KMS: Failed to detect connector\n");
2631 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2632 emulated_link_detect(aconnector->dc_link);
2635 drm_modeset_lock_all(dev);
2636 dm_restore_drm_connector_state(dev, connector);
2637 drm_modeset_unlock_all(dev);
2639 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2640 drm_kms_helper_hotplug_event(dev);
2642 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2643 if (new_connection_type == dc_connection_none &&
2644 aconnector->dc_link->type == dc_connection_none)
2645 dm_set_dpms_off(aconnector->dc_link);
2647 amdgpu_dm_update_connector_after_detect(aconnector);
2649 drm_modeset_lock_all(dev);
2650 dm_restore_drm_connector_state(dev, connector);
2651 drm_modeset_unlock_all(dev);
2653 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2654 drm_kms_helper_hotplug_event(dev);
2656 mutex_unlock(&aconnector->hpd_lock);
2660 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2662 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2664 bool new_irq_handled = false;
2666 int dpcd_bytes_to_read;
2668 const int max_process_count = 30;
2669 int process_count = 0;
2671 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2673 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2674 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2675 /* DPCD 0x200 - 0x201 for downstream IRQ */
2676 dpcd_addr = DP_SINK_COUNT;
2678 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2679 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2680 dpcd_addr = DP_SINK_COUNT_ESI;
2683 dret = drm_dp_dpcd_read(
2684 &aconnector->dm_dp_aux.aux,
2687 dpcd_bytes_to_read);
2689 while (dret == dpcd_bytes_to_read &&
2690 process_count < max_process_count) {
2696 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2697 /* handle HPD short pulse irq */
2698 if (aconnector->mst_mgr.mst_state)
2700 &aconnector->mst_mgr,
2704 if (new_irq_handled) {
2705 /* ACK at DPCD to notify down stream */
2706 const int ack_dpcd_bytes_to_write =
2707 dpcd_bytes_to_read - 1;
2709 for (retry = 0; retry < 3; retry++) {
2712 wret = drm_dp_dpcd_write(
2713 &aconnector->dm_dp_aux.aux,
2716 ack_dpcd_bytes_to_write);
2717 if (wret == ack_dpcd_bytes_to_write)
2721 /* check if there is new irq to be handled */
2722 dret = drm_dp_dpcd_read(
2723 &aconnector->dm_dp_aux.aux,
2726 dpcd_bytes_to_read);
2728 new_irq_handled = false;
2734 if (process_count == max_process_count)
2735 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2738 static void handle_hpd_rx_irq(void *param)
2740 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2741 struct drm_connector *connector = &aconnector->base;
2742 struct drm_device *dev = connector->dev;
2743 struct dc_link *dc_link = aconnector->dc_link;
2744 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2745 bool result = false;
2746 enum dc_connection_type new_connection_type = dc_connection_none;
2747 struct amdgpu_device *adev = drm_to_adev(dev);
2748 union hpd_irq_data hpd_irq_data;
2751 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2753 if (adev->dm.disable_hpd_irq)
2758 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2759 * conflict, after implement i2c helper, this mutex should be
2762 mutex_lock(&aconnector->hpd_lock);
2764 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2766 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2767 (dc_link->type == dc_connection_mst_branch)) {
2768 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2770 dm_handle_hpd_rx_irq(aconnector);
2772 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2774 dm_handle_hpd_rx_irq(aconnector);
2780 * TODO: We need the lock to avoid touching DC state while it's being
2781 * modified during automated compliance testing, or when link loss
2782 * happens. While this should be split into subhandlers and proper
2783 * interfaces to avoid having to conditionally lock like this in the
2784 * outer layer, we need this workaround temporarily to allow MST
2785 * lightup in some scenarios to avoid timeout.
2787 if (!amdgpu_in_reset(adev) &&
2788 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2789 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2790 mutex_lock(&adev->dm.dc_lock);
2794 #ifdef CONFIG_DRM_AMD_DC_HDCP
2795 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2797 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2799 if (!amdgpu_in_reset(adev) && lock_flag)
2800 mutex_unlock(&adev->dm.dc_lock);
2803 if (result && !is_mst_root_connector) {
2804 /* Downstream Port status changed. */
2805 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2806 DRM_ERROR("KMS: Failed to detect connector\n");
2808 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2809 emulated_link_detect(dc_link);
2811 if (aconnector->fake_enable)
2812 aconnector->fake_enable = false;
2814 amdgpu_dm_update_connector_after_detect(aconnector);
2817 drm_modeset_lock_all(dev);
2818 dm_restore_drm_connector_state(dev, connector);
2819 drm_modeset_unlock_all(dev);
2821 drm_kms_helper_hotplug_event(dev);
2822 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2824 if (aconnector->fake_enable)
2825 aconnector->fake_enable = false;
2827 amdgpu_dm_update_connector_after_detect(aconnector);
2830 drm_modeset_lock_all(dev);
2831 dm_restore_drm_connector_state(dev, connector);
2832 drm_modeset_unlock_all(dev);
2834 drm_kms_helper_hotplug_event(dev);
2837 #ifdef CONFIG_DRM_AMD_DC_HDCP
2838 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2839 if (adev->dm.hdcp_workqueue)
2840 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2844 if (dc_link->type != dc_connection_mst_branch)
2845 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2847 mutex_unlock(&aconnector->hpd_lock);
2850 static void register_hpd_handlers(struct amdgpu_device *adev)
2852 struct drm_device *dev = adev_to_drm(adev);
2853 struct drm_connector *connector;
2854 struct amdgpu_dm_connector *aconnector;
2855 const struct dc_link *dc_link;
2856 struct dc_interrupt_params int_params = {0};
2858 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2859 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2861 list_for_each_entry(connector,
2862 &dev->mode_config.connector_list, head) {
2864 aconnector = to_amdgpu_dm_connector(connector);
2865 dc_link = aconnector->dc_link;
2867 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2868 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2869 int_params.irq_source = dc_link->irq_source_hpd;
2871 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2873 (void *) aconnector);
2876 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2878 /* Also register for DP short pulse (hpd_rx). */
2879 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2880 int_params.irq_source = dc_link->irq_source_hpd_rx;
2882 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 (void *) aconnector);
2889 #if defined(CONFIG_DRM_AMD_DC_SI)
2890 /* Register IRQ sources and initialize IRQ callbacks */
2891 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2893 struct dc *dc = adev->dm.dc;
2894 struct common_irq_params *c_irq_params;
2895 struct dc_interrupt_params int_params = {0};
2898 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2900 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2901 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2904 * Actions of amdgpu_irq_add_id():
2905 * 1. Register a set() function with base driver.
2906 * Base driver will call set() function to enable/disable an
2907 * interrupt in DC hardware.
2908 * 2. Register amdgpu_dm_irq_handler().
2909 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2910 * coming from DC hardware.
2911 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2912 * for acknowledging and handling. */
2914 /* Use VBLANK interrupt */
2915 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2916 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2918 DRM_ERROR("Failed to add crtc irq id!\n");
2922 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2923 int_params.irq_source =
2924 dc_interrupt_to_irq_source(dc, i+1 , 0);
2926 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2928 c_irq_params->adev = adev;
2929 c_irq_params->irq_src = int_params.irq_source;
2931 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2932 dm_crtc_high_irq, c_irq_params);
2935 /* Use GRPH_PFLIP interrupt */
2936 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2937 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2938 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2940 DRM_ERROR("Failed to add page flip irq id!\n");
2944 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2945 int_params.irq_source =
2946 dc_interrupt_to_irq_source(dc, i, 0);
2948 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2950 c_irq_params->adev = adev;
2951 c_irq_params->irq_src = int_params.irq_source;
2953 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2954 dm_pflip_high_irq, c_irq_params);
2959 r = amdgpu_irq_add_id(adev, client_id,
2960 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2962 DRM_ERROR("Failed to add hpd irq id!\n");
2966 register_hpd_handlers(adev);
2972 /* Register IRQ sources and initialize IRQ callbacks */
2973 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2975 struct dc *dc = adev->dm.dc;
2976 struct common_irq_params *c_irq_params;
2977 struct dc_interrupt_params int_params = {0};
2980 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2982 if (adev->asic_type >= CHIP_VEGA10)
2983 client_id = SOC15_IH_CLIENTID_DCE;
2985 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2986 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2989 * Actions of amdgpu_irq_add_id():
2990 * 1. Register a set() function with base driver.
2991 * Base driver will call set() function to enable/disable an
2992 * interrupt in DC hardware.
2993 * 2. Register amdgpu_dm_irq_handler().
2994 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2995 * coming from DC hardware.
2996 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2997 * for acknowledging and handling. */
2999 /* Use VBLANK interrupt */
3000 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3001 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3003 DRM_ERROR("Failed to add crtc irq id!\n");
3007 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3008 int_params.irq_source =
3009 dc_interrupt_to_irq_source(dc, i, 0);
3011 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3013 c_irq_params->adev = adev;
3014 c_irq_params->irq_src = int_params.irq_source;
3016 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3017 dm_crtc_high_irq, c_irq_params);
3020 /* Use VUPDATE interrupt */
3021 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3022 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3024 DRM_ERROR("Failed to add vupdate irq id!\n");
3028 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3029 int_params.irq_source =
3030 dc_interrupt_to_irq_source(dc, i, 0);
3032 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3034 c_irq_params->adev = adev;
3035 c_irq_params->irq_src = int_params.irq_source;
3037 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3038 dm_vupdate_high_irq, c_irq_params);
3041 /* Use GRPH_PFLIP interrupt */
3042 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3043 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3044 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3046 DRM_ERROR("Failed to add page flip irq id!\n");
3050 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3051 int_params.irq_source =
3052 dc_interrupt_to_irq_source(dc, i, 0);
3054 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3056 c_irq_params->adev = adev;
3057 c_irq_params->irq_src = int_params.irq_source;
3059 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3060 dm_pflip_high_irq, c_irq_params);
3065 r = amdgpu_irq_add_id(adev, client_id,
3066 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3068 DRM_ERROR("Failed to add hpd irq id!\n");
3072 register_hpd_handlers(adev);
3077 #if defined(CONFIG_DRM_AMD_DC_DCN)
3078 /* Register IRQ sources and initialize IRQ callbacks */
3079 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3081 struct dc *dc = adev->dm.dc;
3082 struct common_irq_params *c_irq_params;
3083 struct dc_interrupt_params int_params = {0};
3086 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3087 static const unsigned int vrtl_int_srcid[] = {
3088 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3089 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3090 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3091 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3092 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3093 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3097 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3098 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3101 * Actions of amdgpu_irq_add_id():
3102 * 1. Register a set() function with base driver.
3103 * Base driver will call set() function to enable/disable an
3104 * interrupt in DC hardware.
3105 * 2. Register amdgpu_dm_irq_handler().
3106 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3107 * coming from DC hardware.
3108 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3109 * for acknowledging and handling.
3112 /* Use VSTARTUP interrupt */
3113 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3114 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3116 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3119 DRM_ERROR("Failed to add crtc irq id!\n");
3123 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3124 int_params.irq_source =
3125 dc_interrupt_to_irq_source(dc, i, 0);
3127 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3129 c_irq_params->adev = adev;
3130 c_irq_params->irq_src = int_params.irq_source;
3132 amdgpu_dm_irq_register_interrupt(
3133 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3136 /* Use otg vertical line interrupt */
3137 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3138 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3139 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3140 vrtl_int_srcid[i], &adev->vline0_irq);
3143 DRM_ERROR("Failed to add vline0 irq id!\n");
3147 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3148 int_params.irq_source =
3149 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3151 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3152 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3156 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3157 - DC_IRQ_SOURCE_DC1_VLINE0];
3159 c_irq_params->adev = adev;
3160 c_irq_params->irq_src = int_params.irq_source;
3162 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3163 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3167 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3168 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3169 * to trigger at end of each vblank, regardless of state of the lock,
3170 * matching DCE behaviour.
3172 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3173 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3175 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3178 DRM_ERROR("Failed to add vupdate irq id!\n");
3182 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3183 int_params.irq_source =
3184 dc_interrupt_to_irq_source(dc, i, 0);
3186 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3188 c_irq_params->adev = adev;
3189 c_irq_params->irq_src = int_params.irq_source;
3191 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3192 dm_vupdate_high_irq, c_irq_params);
3195 /* Use GRPH_PFLIP interrupt */
3196 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3197 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3199 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3201 DRM_ERROR("Failed to add page flip irq id!\n");
3205 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3206 int_params.irq_source =
3207 dc_interrupt_to_irq_source(dc, i, 0);
3209 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3211 c_irq_params->adev = adev;
3212 c_irq_params->irq_src = int_params.irq_source;
3214 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3215 dm_pflip_high_irq, c_irq_params);
3220 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3223 DRM_ERROR("Failed to add hpd irq id!\n");
3227 register_hpd_handlers(adev);
3231 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3232 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3234 struct dc *dc = adev->dm.dc;
3235 struct common_irq_params *c_irq_params;
3236 struct dc_interrupt_params int_params = {0};
3239 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3240 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3242 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3243 &adev->dmub_outbox_irq);
3245 DRM_ERROR("Failed to add outbox irq id!\n");
3249 if (dc->ctx->dmub_srv) {
3250 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3251 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3252 int_params.irq_source =
3253 dc_interrupt_to_irq_source(dc, i, 0);
3255 c_irq_params = &adev->dm.dmub_outbox_params[0];
3257 c_irq_params->adev = adev;
3258 c_irq_params->irq_src = int_params.irq_source;
3260 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3261 dm_dmub_outbox1_low_irq, c_irq_params);
3269 * Acquires the lock for the atomic state object and returns
3270 * the new atomic state.
3272 * This should only be called during atomic check.
3274 static int dm_atomic_get_state(struct drm_atomic_state *state,
3275 struct dm_atomic_state **dm_state)
3277 struct drm_device *dev = state->dev;
3278 struct amdgpu_device *adev = drm_to_adev(dev);
3279 struct amdgpu_display_manager *dm = &adev->dm;
3280 struct drm_private_state *priv_state;
3285 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3286 if (IS_ERR(priv_state))
3287 return PTR_ERR(priv_state);
3289 *dm_state = to_dm_atomic_state(priv_state);
3294 static struct dm_atomic_state *
3295 dm_atomic_get_new_state(struct drm_atomic_state *state)
3297 struct drm_device *dev = state->dev;
3298 struct amdgpu_device *adev = drm_to_adev(dev);
3299 struct amdgpu_display_manager *dm = &adev->dm;
3300 struct drm_private_obj *obj;
3301 struct drm_private_state *new_obj_state;
3304 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3305 if (obj->funcs == dm->atomic_obj.funcs)
3306 return to_dm_atomic_state(new_obj_state);
3312 static struct drm_private_state *
3313 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3315 struct dm_atomic_state *old_state, *new_state;
3317 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3321 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3323 old_state = to_dm_atomic_state(obj->state);
3325 if (old_state && old_state->context)
3326 new_state->context = dc_copy_state(old_state->context);
3328 if (!new_state->context) {
3333 return &new_state->base;
3336 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3337 struct drm_private_state *state)
3339 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3341 if (dm_state && dm_state->context)
3342 dc_release_state(dm_state->context);
3347 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3348 .atomic_duplicate_state = dm_atomic_duplicate_state,
3349 .atomic_destroy_state = dm_atomic_destroy_state,
3352 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3354 struct dm_atomic_state *state;
3357 adev->mode_info.mode_config_initialized = true;
3359 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3360 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3362 adev_to_drm(adev)->mode_config.max_width = 16384;
3363 adev_to_drm(adev)->mode_config.max_height = 16384;
3365 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3366 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3367 /* indicates support for immediate flip */
3368 adev_to_drm(adev)->mode_config.async_page_flip = true;
3370 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3372 state = kzalloc(sizeof(*state), GFP_KERNEL);
3376 state->context = dc_create_state(adev->dm.dc);
3377 if (!state->context) {
3382 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3384 drm_atomic_private_obj_init(adev_to_drm(adev),
3385 &adev->dm.atomic_obj,
3387 &dm_atomic_state_funcs);
3389 r = amdgpu_display_modeset_create_props(adev);
3391 dc_release_state(state->context);
3396 r = amdgpu_dm_audio_init(adev);
3398 dc_release_state(state->context);
3406 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3407 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3408 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3410 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3411 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3413 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3415 #if defined(CONFIG_ACPI)
3416 struct amdgpu_dm_backlight_caps caps;
3418 memset(&caps, 0, sizeof(caps));
3420 if (dm->backlight_caps.caps_valid)
3423 amdgpu_acpi_get_backlight_caps(&caps);
3424 if (caps.caps_valid) {
3425 dm->backlight_caps.caps_valid = true;
3426 if (caps.aux_support)
3428 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3429 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3431 dm->backlight_caps.min_input_signal =
3432 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3433 dm->backlight_caps.max_input_signal =
3434 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3437 if (dm->backlight_caps.aux_support)
3440 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3441 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3445 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3446 unsigned *min, unsigned *max)
3451 if (caps->aux_support) {
3452 // Firmware limits are in nits, DC API wants millinits.
3453 *max = 1000 * caps->aux_max_input_signal;
3454 *min = 1000 * caps->aux_min_input_signal;
3456 // Firmware limits are 8-bit, PWM control is 16-bit.
3457 *max = 0x101 * caps->max_input_signal;
3458 *min = 0x101 * caps->min_input_signal;
3463 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3464 uint32_t brightness)
3468 if (!get_brightness_range(caps, &min, &max))
3471 // Rescale 0..255 to min..max
3472 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3473 AMDGPU_MAX_BL_LEVEL);
3476 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3477 uint32_t brightness)
3481 if (!get_brightness_range(caps, &min, &max))
3484 if (brightness < min)
3486 // Rescale min..max to 0..255
3487 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3491 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3492 u32 user_brightness)
3494 struct amdgpu_dm_backlight_caps caps;
3495 struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3496 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3500 amdgpu_dm_update_backlight_caps(dm);
3501 caps = dm->backlight_caps;
3503 for (i = 0; i < dm->num_of_edps; i++) {
3504 dm->brightness[i] = user_brightness;
3505 brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3506 link[i] = (struct dc_link *)dm->backlight_link[i];
3509 /* Change brightness based on AUX property */
3510 if (caps.aux_support) {
3511 for (i = 0; i < dm->num_of_edps; i++) {
3512 rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3513 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3515 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3520 for (i = 0; i < dm->num_of_edps; i++) {
3521 rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3523 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3532 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3534 struct amdgpu_display_manager *dm = bl_get_data(bd);
3536 amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3541 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3543 struct amdgpu_dm_backlight_caps caps;
3545 amdgpu_dm_update_backlight_caps(dm);
3546 caps = dm->backlight_caps;
3548 if (caps.aux_support) {
3549 struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3553 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3555 return dm->brightness[0];
3556 return convert_brightness_to_user(&caps, avg);
3558 int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3560 if (ret == DC_ERROR_UNEXPECTED)
3561 return dm->brightness[0];
3562 return convert_brightness_to_user(&caps, ret);
3566 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3568 struct amdgpu_display_manager *dm = bl_get_data(bd);
3570 return amdgpu_dm_backlight_get_level(dm);
3573 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3574 .options = BL_CORE_SUSPENDRESUME,
3575 .get_brightness = amdgpu_dm_backlight_get_brightness,
3576 .update_status = amdgpu_dm_backlight_update_status,
3580 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3583 struct backlight_properties props = { 0 };
3586 amdgpu_dm_update_backlight_caps(dm);
3587 for (i = 0; i < dm->num_of_edps; i++)
3588 dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3590 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3591 props.brightness = AMDGPU_MAX_BL_LEVEL;
3592 props.type = BACKLIGHT_RAW;
3594 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3595 adev_to_drm(dm->adev)->primary->index);
3597 dm->backlight_dev = backlight_device_register(bl_name,
3598 adev_to_drm(dm->adev)->dev,
3600 &amdgpu_dm_backlight_ops,
3603 if (IS_ERR(dm->backlight_dev))
3604 DRM_ERROR("DM: Backlight registration failed!\n");
3606 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3611 static int initialize_plane(struct amdgpu_display_manager *dm,
3612 struct amdgpu_mode_info *mode_info, int plane_id,
3613 enum drm_plane_type plane_type,
3614 const struct dc_plane_cap *plane_cap)
3616 struct drm_plane *plane;
3617 unsigned long possible_crtcs;
3620 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3622 DRM_ERROR("KMS: Failed to allocate plane\n");
3625 plane->type = plane_type;
3628 * HACK: IGT tests expect that the primary plane for a CRTC
3629 * can only have one possible CRTC. Only expose support for
3630 * any CRTC if they're not going to be used as a primary plane
3631 * for a CRTC - like overlay or underlay planes.
3633 possible_crtcs = 1 << plane_id;
3634 if (plane_id >= dm->dc->caps.max_streams)
3635 possible_crtcs = 0xff;
3637 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3640 DRM_ERROR("KMS: Failed to initialize plane\n");
3646 mode_info->planes[plane_id] = plane;
3652 static void register_backlight_device(struct amdgpu_display_manager *dm,
3653 struct dc_link *link)
3655 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3656 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3658 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3659 link->type != dc_connection_none) {
3661 * Event if registration failed, we should continue with
3662 * DM initialization because not having a backlight control
3663 * is better then a black screen.
3665 if (!dm->backlight_dev)
3666 amdgpu_dm_register_backlight_device(dm);
3668 if (dm->backlight_dev) {
3669 dm->backlight_link[dm->num_of_edps] = link;
3678 * In this architecture, the association
3679 * connector -> encoder -> crtc
3680 * id not really requried. The crtc and connector will hold the
3681 * display_index as an abstraction to use with DAL component
3683 * Returns 0 on success
3685 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3687 struct amdgpu_display_manager *dm = &adev->dm;
3689 struct amdgpu_dm_connector *aconnector = NULL;
3690 struct amdgpu_encoder *aencoder = NULL;
3691 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3693 int32_t primary_planes;
3694 enum dc_connection_type new_connection_type = dc_connection_none;
3695 const struct dc_plane_cap *plane;
3697 dm->display_indexes_num = dm->dc->caps.max_streams;
3698 /* Update the actual used number of crtc */
3699 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3701 link_cnt = dm->dc->caps.max_links;
3702 if (amdgpu_dm_mode_config_init(dm->adev)) {
3703 DRM_ERROR("DM: Failed to initialize mode config\n");
3707 /* There is one primary plane per CRTC */
3708 primary_planes = dm->dc->caps.max_streams;
3709 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3712 * Initialize primary planes, implicit planes for legacy IOCTLS.
3713 * Order is reversed to match iteration order in atomic check.
3715 for (i = (primary_planes - 1); i >= 0; i--) {
3716 plane = &dm->dc->caps.planes[i];
3718 if (initialize_plane(dm, mode_info, i,
3719 DRM_PLANE_TYPE_PRIMARY, plane)) {
3720 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3726 * Initialize overlay planes, index starting after primary planes.
3727 * These planes have a higher DRM index than the primary planes since
3728 * they should be considered as having a higher z-order.
3729 * Order is reversed to match iteration order in atomic check.
3731 * Only support DCN for now, and only expose one so we don't encourage
3732 * userspace to use up all the pipes.
3734 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3735 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3737 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3740 if (!plane->blends_with_above || !plane->blends_with_below)
3743 if (!plane->pixel_format_support.argb8888)
3746 if (initialize_plane(dm, NULL, primary_planes + i,
3747 DRM_PLANE_TYPE_OVERLAY, plane)) {
3748 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3752 /* Only create one overlay plane. */
3756 for (i = 0; i < dm->dc->caps.max_streams; i++)
3757 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3758 DRM_ERROR("KMS: Failed to initialize crtc\n");
3762 #if defined(CONFIG_DRM_AMD_DC_DCN)
3763 /* Use Outbox interrupt */
3764 switch (adev->asic_type) {
3765 case CHIP_SIENNA_CICHLID:
3766 case CHIP_NAVY_FLOUNDER:
3768 if (register_outbox_irq_handlers(dm->adev)) {
3769 DRM_ERROR("DM: Failed to initialize IRQ\n");
3774 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3778 /* loops over all connectors on the board */
3779 for (i = 0; i < link_cnt; i++) {
3780 struct dc_link *link = NULL;
3782 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3784 "KMS: Cannot support more than %d display indexes\n",
3785 AMDGPU_DM_MAX_DISPLAY_INDEX);
3789 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3793 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3797 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3798 DRM_ERROR("KMS: Failed to initialize encoder\n");
3802 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3803 DRM_ERROR("KMS: Failed to initialize connector\n");
3807 link = dc_get_link_at_index(dm->dc, i);
3809 if (!dc_link_detect_sink(link, &new_connection_type))
3810 DRM_ERROR("KMS: Failed to detect connector\n");
3812 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3813 emulated_link_detect(link);
3814 amdgpu_dm_update_connector_after_detect(aconnector);
3816 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3817 amdgpu_dm_update_connector_after_detect(aconnector);
3818 register_backlight_device(dm, link);
3819 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3820 amdgpu_dm_set_psr_caps(link);
3826 /* Software is initialized. Now we can register interrupt handlers. */
3827 switch (adev->asic_type) {
3828 #if defined(CONFIG_DRM_AMD_DC_SI)
3833 if (dce60_register_irq_handlers(dm->adev)) {
3834 DRM_ERROR("DM: Failed to initialize IRQ\n");
3848 case CHIP_POLARIS11:
3849 case CHIP_POLARIS10:
3850 case CHIP_POLARIS12:
3855 if (dce110_register_irq_handlers(dm->adev)) {
3856 DRM_ERROR("DM: Failed to initialize IRQ\n");
3860 #if defined(CONFIG_DRM_AMD_DC_DCN)
3866 case CHIP_SIENNA_CICHLID:
3867 case CHIP_NAVY_FLOUNDER:
3868 case CHIP_DIMGREY_CAVEFISH:
3869 case CHIP_BEIGE_GOBY:
3871 if (dcn10_register_irq_handlers(dm->adev)) {
3872 DRM_ERROR("DM: Failed to initialize IRQ\n");
3878 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3890 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3892 drm_atomic_private_obj_fini(&dm->atomic_obj);
3896 /******************************************************************************
3897 * amdgpu_display_funcs functions
3898 *****************************************************************************/
3901 * dm_bandwidth_update - program display watermarks
3903 * @adev: amdgpu_device pointer
3905 * Calculate and program the display watermarks and line buffer allocation.
3907 static void dm_bandwidth_update(struct amdgpu_device *adev)
3909 /* TODO: implement later */
3912 static const struct amdgpu_display_funcs dm_display_funcs = {
3913 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3914 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3915 .backlight_set_level = NULL, /* never called for DC */
3916 .backlight_get_level = NULL, /* never called for DC */
3917 .hpd_sense = NULL,/* called unconditionally */
3918 .hpd_set_polarity = NULL, /* called unconditionally */
3919 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3920 .page_flip_get_scanoutpos =
3921 dm_crtc_get_scanoutpos,/* called unconditionally */
3922 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3923 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3926 #if defined(CONFIG_DEBUG_KERNEL_DC)
3928 static ssize_t s3_debug_store(struct device *device,
3929 struct device_attribute *attr,
3935 struct drm_device *drm_dev = dev_get_drvdata(device);
3936 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3938 ret = kstrtoint(buf, 0, &s3_state);
3943 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3948 return ret == 0 ? count : 0;
3951 DEVICE_ATTR_WO(s3_debug);
3955 static int dm_early_init(void *handle)
3957 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3959 switch (adev->asic_type) {
3960 #if defined(CONFIG_DRM_AMD_DC_SI)
3964 adev->mode_info.num_crtc = 6;
3965 adev->mode_info.num_hpd = 6;
3966 adev->mode_info.num_dig = 6;
3969 adev->mode_info.num_crtc = 2;
3970 adev->mode_info.num_hpd = 2;
3971 adev->mode_info.num_dig = 2;
3976 adev->mode_info.num_crtc = 6;
3977 adev->mode_info.num_hpd = 6;
3978 adev->mode_info.num_dig = 6;
3981 adev->mode_info.num_crtc = 4;
3982 adev->mode_info.num_hpd = 6;
3983 adev->mode_info.num_dig = 7;
3987 adev->mode_info.num_crtc = 2;
3988 adev->mode_info.num_hpd = 6;
3989 adev->mode_info.num_dig = 6;
3993 adev->mode_info.num_crtc = 6;
3994 adev->mode_info.num_hpd = 6;
3995 adev->mode_info.num_dig = 7;
3998 adev->mode_info.num_crtc = 3;
3999 adev->mode_info.num_hpd = 6;
4000 adev->mode_info.num_dig = 9;
4003 adev->mode_info.num_crtc = 2;
4004 adev->mode_info.num_hpd = 6;
4005 adev->mode_info.num_dig = 9;
4007 case CHIP_POLARIS11:
4008 case CHIP_POLARIS12:
4009 adev->mode_info.num_crtc = 5;
4010 adev->mode_info.num_hpd = 5;
4011 adev->mode_info.num_dig = 5;
4013 case CHIP_POLARIS10:
4015 adev->mode_info.num_crtc = 6;
4016 adev->mode_info.num_hpd = 6;
4017 adev->mode_info.num_dig = 6;
4022 adev->mode_info.num_crtc = 6;
4023 adev->mode_info.num_hpd = 6;
4024 adev->mode_info.num_dig = 6;
4026 #if defined(CONFIG_DRM_AMD_DC_DCN)
4030 adev->mode_info.num_crtc = 4;
4031 adev->mode_info.num_hpd = 4;
4032 adev->mode_info.num_dig = 4;
4036 case CHIP_SIENNA_CICHLID:
4037 case CHIP_NAVY_FLOUNDER:
4038 adev->mode_info.num_crtc = 6;
4039 adev->mode_info.num_hpd = 6;
4040 adev->mode_info.num_dig = 6;
4043 case CHIP_DIMGREY_CAVEFISH:
4044 adev->mode_info.num_crtc = 5;
4045 adev->mode_info.num_hpd = 5;
4046 adev->mode_info.num_dig = 5;
4048 case CHIP_BEIGE_GOBY:
4049 adev->mode_info.num_crtc = 2;
4050 adev->mode_info.num_hpd = 2;
4051 adev->mode_info.num_dig = 2;
4055 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4059 amdgpu_dm_set_irq_funcs(adev);
4061 if (adev->mode_info.funcs == NULL)
4062 adev->mode_info.funcs = &dm_display_funcs;
4065 * Note: Do NOT change adev->audio_endpt_rreg and
4066 * adev->audio_endpt_wreg because they are initialised in
4067 * amdgpu_device_init()
4069 #if defined(CONFIG_DEBUG_KERNEL_DC)
4071 adev_to_drm(adev)->dev,
4072 &dev_attr_s3_debug);
4078 static bool modeset_required(struct drm_crtc_state *crtc_state,
4079 struct dc_stream_state *new_stream,
4080 struct dc_stream_state *old_stream)
4082 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4085 static bool modereset_required(struct drm_crtc_state *crtc_state)
4087 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4090 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4092 drm_encoder_cleanup(encoder);
4096 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4097 .destroy = amdgpu_dm_encoder_destroy,
4101 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4102 struct drm_framebuffer *fb,
4103 int *min_downscale, int *max_upscale)
4105 struct amdgpu_device *adev = drm_to_adev(dev);
4106 struct dc *dc = adev->dm.dc;
4107 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4108 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4110 switch (fb->format->format) {
4111 case DRM_FORMAT_P010:
4112 case DRM_FORMAT_NV12:
4113 case DRM_FORMAT_NV21:
4114 *max_upscale = plane_cap->max_upscale_factor.nv12;
4115 *min_downscale = plane_cap->max_downscale_factor.nv12;
4118 case DRM_FORMAT_XRGB16161616F:
4119 case DRM_FORMAT_ARGB16161616F:
4120 case DRM_FORMAT_XBGR16161616F:
4121 case DRM_FORMAT_ABGR16161616F:
4122 *max_upscale = plane_cap->max_upscale_factor.fp16;
4123 *min_downscale = plane_cap->max_downscale_factor.fp16;
4127 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4128 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4133 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4134 * scaling factor of 1.0 == 1000 units.
4136 if (*max_upscale == 1)
4137 *max_upscale = 1000;
4139 if (*min_downscale == 1)
4140 *min_downscale = 1000;
4144 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4145 struct dc_scaling_info *scaling_info)
4147 int scale_w, scale_h, min_downscale, max_upscale;
4149 memset(scaling_info, 0, sizeof(*scaling_info));
4151 /* Source is fixed 16.16 but we ignore mantissa for now... */
4152 scaling_info->src_rect.x = state->src_x >> 16;
4153 scaling_info->src_rect.y = state->src_y >> 16;
4156 * For reasons we don't (yet) fully understand a non-zero
4157 * src_y coordinate into an NV12 buffer can cause a
4158 * system hang. To avoid hangs (and maybe be overly cautious)
4159 * let's reject both non-zero src_x and src_y.
4161 * We currently know of only one use-case to reproduce a
4162 * scenario with non-zero src_x and src_y for NV12, which
4163 * is to gesture the YouTube Android app into full screen
4167 state->fb->format->format == DRM_FORMAT_NV12 &&
4168 (scaling_info->src_rect.x != 0 ||
4169 scaling_info->src_rect.y != 0))
4172 scaling_info->src_rect.width = state->src_w >> 16;
4173 if (scaling_info->src_rect.width == 0)
4176 scaling_info->src_rect.height = state->src_h >> 16;
4177 if (scaling_info->src_rect.height == 0)
4180 scaling_info->dst_rect.x = state->crtc_x;
4181 scaling_info->dst_rect.y = state->crtc_y;
4183 if (state->crtc_w == 0)
4186 scaling_info->dst_rect.width = state->crtc_w;
4188 if (state->crtc_h == 0)
4191 scaling_info->dst_rect.height = state->crtc_h;
4193 /* DRM doesn't specify clipping on destination output. */
4194 scaling_info->clip_rect = scaling_info->dst_rect;
4196 /* Validate scaling per-format with DC plane caps */
4197 if (state->plane && state->plane->dev && state->fb) {
4198 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4199 &min_downscale, &max_upscale);
4201 min_downscale = 250;
4202 max_upscale = 16000;
4205 scale_w = scaling_info->dst_rect.width * 1000 /
4206 scaling_info->src_rect.width;
4208 if (scale_w < min_downscale || scale_w > max_upscale)
4211 scale_h = scaling_info->dst_rect.height * 1000 /
4212 scaling_info->src_rect.height;
4214 if (scale_h < min_downscale || scale_h > max_upscale)
4218 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4219 * assume reasonable defaults based on the format.
4226 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4227 uint64_t tiling_flags)
4229 /* Fill GFX8 params */
4230 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4231 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4233 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4234 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4235 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4236 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4237 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4239 /* XXX fix me for VI */
4240 tiling_info->gfx8.num_banks = num_banks;
4241 tiling_info->gfx8.array_mode =
4242 DC_ARRAY_2D_TILED_THIN1;
4243 tiling_info->gfx8.tile_split = tile_split;
4244 tiling_info->gfx8.bank_width = bankw;
4245 tiling_info->gfx8.bank_height = bankh;
4246 tiling_info->gfx8.tile_aspect = mtaspect;
4247 tiling_info->gfx8.tile_mode =
4248 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4249 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4250 == DC_ARRAY_1D_TILED_THIN1) {
4251 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4254 tiling_info->gfx8.pipe_config =
4255 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4259 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4260 union dc_tiling_info *tiling_info)
4262 tiling_info->gfx9.num_pipes =
4263 adev->gfx.config.gb_addr_config_fields.num_pipes;
4264 tiling_info->gfx9.num_banks =
4265 adev->gfx.config.gb_addr_config_fields.num_banks;
4266 tiling_info->gfx9.pipe_interleave =
4267 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4268 tiling_info->gfx9.num_shader_engines =
4269 adev->gfx.config.gb_addr_config_fields.num_se;
4270 tiling_info->gfx9.max_compressed_frags =
4271 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4272 tiling_info->gfx9.num_rb_per_se =
4273 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4274 tiling_info->gfx9.shaderEnable = 1;
4275 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4276 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4277 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4278 adev->asic_type == CHIP_BEIGE_GOBY ||
4279 adev->asic_type == CHIP_VANGOGH)
4280 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4284 validate_dcc(struct amdgpu_device *adev,
4285 const enum surface_pixel_format format,
4286 const enum dc_rotation_angle rotation,
4287 const union dc_tiling_info *tiling_info,
4288 const struct dc_plane_dcc_param *dcc,
4289 const struct dc_plane_address *address,
4290 const struct plane_size *plane_size)
4292 struct dc *dc = adev->dm.dc;
4293 struct dc_dcc_surface_param input;
4294 struct dc_surface_dcc_cap output;
4296 memset(&input, 0, sizeof(input));
4297 memset(&output, 0, sizeof(output));
4302 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4303 !dc->cap_funcs.get_dcc_compression_cap)
4306 input.format = format;
4307 input.surface_size.width = plane_size->surface_size.width;
4308 input.surface_size.height = plane_size->surface_size.height;
4309 input.swizzle_mode = tiling_info->gfx9.swizzle;
4311 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4312 input.scan = SCAN_DIRECTION_HORIZONTAL;
4313 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4314 input.scan = SCAN_DIRECTION_VERTICAL;
4316 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4319 if (!output.capable)
4322 if (dcc->independent_64b_blks == 0 &&
4323 output.grph.rgb.independent_64b_blks != 0)
4330 modifier_has_dcc(uint64_t modifier)
4332 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4336 modifier_gfx9_swizzle_mode(uint64_t modifier)
4338 if (modifier == DRM_FORMAT_MOD_LINEAR)
4341 return AMD_FMT_MOD_GET(TILE, modifier);
4344 static const struct drm_format_info *
4345 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4347 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4351 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4352 union dc_tiling_info *tiling_info,
4355 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4356 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4357 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4358 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4360 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4362 if (!IS_AMD_FMT_MOD(modifier))
4365 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4366 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4368 if (adev->family >= AMDGPU_FAMILY_NV) {
4369 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4371 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4373 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4377 enum dm_micro_swizzle {
4378 MICRO_SWIZZLE_Z = 0,
4379 MICRO_SWIZZLE_S = 1,
4380 MICRO_SWIZZLE_D = 2,
4384 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4388 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4389 const struct drm_format_info *info = drm_format_info(format);
4392 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4398 * We always have to allow these modifiers:
4399 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4400 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4402 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4403 modifier == DRM_FORMAT_MOD_INVALID) {
4407 /* Check that the modifier is on the list of the plane's supported modifiers. */
4408 for (i = 0; i < plane->modifier_count; i++) {
4409 if (modifier == plane->modifiers[i])
4412 if (i == plane->modifier_count)
4416 * For D swizzle the canonical modifier depends on the bpp, so check
4419 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4420 adev->family >= AMDGPU_FAMILY_NV) {
4421 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4425 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4429 if (modifier_has_dcc(modifier)) {
4430 /* Per radeonsi comments 16/64 bpp are more complicated. */
4431 if (info->cpp[0] != 4)
4433 /* We support multi-planar formats, but not when combined with
4434 * additional DCC metadata planes. */
4435 if (info->num_planes > 1)
4443 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4448 if (*cap - *size < 1) {
4449 uint64_t new_cap = *cap * 2;
4450 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4458 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4464 (*mods)[*size] = mod;
4469 add_gfx9_modifiers(const struct amdgpu_device *adev,
4470 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4472 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4473 int pipe_xor_bits = min(8, pipes +
4474 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4475 int bank_xor_bits = min(8 - pipe_xor_bits,
4476 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4477 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4478 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4481 if (adev->family == AMDGPU_FAMILY_RV) {
4482 /* Raven2 and later */
4483 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4486 * No _D DCC swizzles yet because we only allow 32bpp, which
4487 * doesn't support _D on DCN
4490 if (has_constant_encode) {
4491 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4492 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4493 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4494 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4495 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4496 AMD_FMT_MOD_SET(DCC, 1) |
4497 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4498 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4499 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4502 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4503 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4504 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4505 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4506 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4507 AMD_FMT_MOD_SET(DCC, 1) |
4508 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4509 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4510 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4512 if (has_constant_encode) {
4513 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4514 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4515 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4516 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4517 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4518 AMD_FMT_MOD_SET(DCC, 1) |
4519 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4520 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4521 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4523 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4524 AMD_FMT_MOD_SET(RB, rb) |
4525 AMD_FMT_MOD_SET(PIPE, pipes));
4528 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4530 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4531 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4532 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4533 AMD_FMT_MOD_SET(DCC, 1) |
4534 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4535 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4536 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4537 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4538 AMD_FMT_MOD_SET(RB, rb) |
4539 AMD_FMT_MOD_SET(PIPE, pipes));
4543 * Only supported for 64bpp on Raven, will be filtered on format in
4544 * dm_plane_format_mod_supported.
4546 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4547 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4548 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4549 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4550 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4552 if (adev->family == AMDGPU_FAMILY_RV) {
4553 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4554 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4555 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4556 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4557 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4561 * Only supported for 64bpp on Raven, will be filtered on format in
4562 * dm_plane_format_mod_supported.
4564 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4565 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4566 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4568 if (adev->family == AMDGPU_FAMILY_RV) {
4569 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4570 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4571 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4576 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4577 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4579 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4581 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4582 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4583 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4584 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4585 AMD_FMT_MOD_SET(DCC, 1) |
4586 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4587 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4588 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4590 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4592 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4593 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4594 AMD_FMT_MOD_SET(DCC, 1) |
4595 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4596 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4597 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4598 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4600 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4601 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4602 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4603 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4605 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4606 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4607 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4608 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4611 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4612 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4614 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4616 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4617 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4618 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4622 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4623 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4625 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4626 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4628 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4629 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4630 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4631 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4632 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4633 AMD_FMT_MOD_SET(DCC, 1) |
4634 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4635 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4636 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4637 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4639 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4640 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4641 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4642 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4643 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4644 AMD_FMT_MOD_SET(DCC, 1) |
4645 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4646 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4647 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4648 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4649 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4651 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4652 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4653 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4654 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4655 AMD_FMT_MOD_SET(PACKERS, pkrs));
4657 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4658 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4659 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4660 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4661 AMD_FMT_MOD_SET(PACKERS, pkrs));
4663 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4664 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4666 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4668 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4669 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4670 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4674 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4676 uint64_t size = 0, capacity = 128;
4679 /* We have not hooked up any pre-GFX9 modifiers. */
4680 if (adev->family < AMDGPU_FAMILY_AI)
4683 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4685 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4686 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4687 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4688 return *mods ? 0 : -ENOMEM;
4691 switch (adev->family) {
4692 case AMDGPU_FAMILY_AI:
4693 case AMDGPU_FAMILY_RV:
4694 add_gfx9_modifiers(adev, mods, &size, &capacity);
4696 case AMDGPU_FAMILY_NV:
4697 case AMDGPU_FAMILY_VGH:
4698 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4699 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4701 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4705 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4707 /* INVALID marks the end of the list. */
4708 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4717 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4718 const struct amdgpu_framebuffer *afb,
4719 const enum surface_pixel_format format,
4720 const enum dc_rotation_angle rotation,
4721 const struct plane_size *plane_size,
4722 union dc_tiling_info *tiling_info,
4723 struct dc_plane_dcc_param *dcc,
4724 struct dc_plane_address *address,
4725 const bool force_disable_dcc)
4727 const uint64_t modifier = afb->base.modifier;
4730 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4731 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4733 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4734 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4737 dcc->meta_pitch = afb->base.pitches[1];
4738 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4740 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4741 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4744 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4752 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4753 const struct amdgpu_framebuffer *afb,
4754 const enum surface_pixel_format format,
4755 const enum dc_rotation_angle rotation,
4756 const uint64_t tiling_flags,
4757 union dc_tiling_info *tiling_info,
4758 struct plane_size *plane_size,
4759 struct dc_plane_dcc_param *dcc,
4760 struct dc_plane_address *address,
4762 bool force_disable_dcc)
4764 const struct drm_framebuffer *fb = &afb->base;
4767 memset(tiling_info, 0, sizeof(*tiling_info));
4768 memset(plane_size, 0, sizeof(*plane_size));
4769 memset(dcc, 0, sizeof(*dcc));
4770 memset(address, 0, sizeof(*address));
4772 address->tmz_surface = tmz_surface;
4774 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4775 uint64_t addr = afb->address + fb->offsets[0];
4777 plane_size->surface_size.x = 0;
4778 plane_size->surface_size.y = 0;
4779 plane_size->surface_size.width = fb->width;
4780 plane_size->surface_size.height = fb->height;
4781 plane_size->surface_pitch =
4782 fb->pitches[0] / fb->format->cpp[0];
4784 address->type = PLN_ADDR_TYPE_GRAPHICS;
4785 address->grph.addr.low_part = lower_32_bits(addr);
4786 address->grph.addr.high_part = upper_32_bits(addr);
4787 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4788 uint64_t luma_addr = afb->address + fb->offsets[0];
4789 uint64_t chroma_addr = afb->address + fb->offsets[1];
4791 plane_size->surface_size.x = 0;
4792 plane_size->surface_size.y = 0;
4793 plane_size->surface_size.width = fb->width;
4794 plane_size->surface_size.height = fb->height;
4795 plane_size->surface_pitch =
4796 fb->pitches[0] / fb->format->cpp[0];
4798 plane_size->chroma_size.x = 0;
4799 plane_size->chroma_size.y = 0;
4800 /* TODO: set these based on surface format */
4801 plane_size->chroma_size.width = fb->width / 2;
4802 plane_size->chroma_size.height = fb->height / 2;
4804 plane_size->chroma_pitch =
4805 fb->pitches[1] / fb->format->cpp[1];
4807 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4808 address->video_progressive.luma_addr.low_part =
4809 lower_32_bits(luma_addr);
4810 address->video_progressive.luma_addr.high_part =
4811 upper_32_bits(luma_addr);
4812 address->video_progressive.chroma_addr.low_part =
4813 lower_32_bits(chroma_addr);
4814 address->video_progressive.chroma_addr.high_part =
4815 upper_32_bits(chroma_addr);
4818 if (adev->family >= AMDGPU_FAMILY_AI) {
4819 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4820 rotation, plane_size,
4827 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4834 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4835 bool *per_pixel_alpha, bool *global_alpha,
4836 int *global_alpha_value)
4838 *per_pixel_alpha = false;
4839 *global_alpha = false;
4840 *global_alpha_value = 0xff;
4842 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4845 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4846 static const uint32_t alpha_formats[] = {
4847 DRM_FORMAT_ARGB8888,
4848 DRM_FORMAT_RGBA8888,
4849 DRM_FORMAT_ABGR8888,
4851 uint32_t format = plane_state->fb->format->format;
4854 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4855 if (format == alpha_formats[i]) {
4856 *per_pixel_alpha = true;
4862 if (plane_state->alpha < 0xffff) {
4863 *global_alpha = true;
4864 *global_alpha_value = plane_state->alpha >> 8;
4869 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4870 const enum surface_pixel_format format,
4871 enum dc_color_space *color_space)
4875 *color_space = COLOR_SPACE_SRGB;
4877 /* DRM color properties only affect non-RGB formats. */
4878 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4881 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4883 switch (plane_state->color_encoding) {
4884 case DRM_COLOR_YCBCR_BT601:
4886 *color_space = COLOR_SPACE_YCBCR601;
4888 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4891 case DRM_COLOR_YCBCR_BT709:
4893 *color_space = COLOR_SPACE_YCBCR709;
4895 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4898 case DRM_COLOR_YCBCR_BT2020:
4900 *color_space = COLOR_SPACE_2020_YCBCR;
4913 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4914 const struct drm_plane_state *plane_state,
4915 const uint64_t tiling_flags,
4916 struct dc_plane_info *plane_info,
4917 struct dc_plane_address *address,
4919 bool force_disable_dcc)
4921 const struct drm_framebuffer *fb = plane_state->fb;
4922 const struct amdgpu_framebuffer *afb =
4923 to_amdgpu_framebuffer(plane_state->fb);
4926 memset(plane_info, 0, sizeof(*plane_info));
4928 switch (fb->format->format) {
4930 plane_info->format =
4931 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4933 case DRM_FORMAT_RGB565:
4934 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4936 case DRM_FORMAT_XRGB8888:
4937 case DRM_FORMAT_ARGB8888:
4938 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4940 case DRM_FORMAT_XRGB2101010:
4941 case DRM_FORMAT_ARGB2101010:
4942 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4944 case DRM_FORMAT_XBGR2101010:
4945 case DRM_FORMAT_ABGR2101010:
4946 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4948 case DRM_FORMAT_XBGR8888:
4949 case DRM_FORMAT_ABGR8888:
4950 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4952 case DRM_FORMAT_NV21:
4953 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4955 case DRM_FORMAT_NV12:
4956 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4958 case DRM_FORMAT_P010:
4959 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4961 case DRM_FORMAT_XRGB16161616F:
4962 case DRM_FORMAT_ARGB16161616F:
4963 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4965 case DRM_FORMAT_XBGR16161616F:
4966 case DRM_FORMAT_ABGR16161616F:
4967 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4969 case DRM_FORMAT_XRGB16161616:
4970 case DRM_FORMAT_ARGB16161616:
4971 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4973 case DRM_FORMAT_XBGR16161616:
4974 case DRM_FORMAT_ABGR16161616:
4975 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4979 "Unsupported screen format %p4cc\n",
4980 &fb->format->format);
4984 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4985 case DRM_MODE_ROTATE_0:
4986 plane_info->rotation = ROTATION_ANGLE_0;
4988 case DRM_MODE_ROTATE_90:
4989 plane_info->rotation = ROTATION_ANGLE_90;
4991 case DRM_MODE_ROTATE_180:
4992 plane_info->rotation = ROTATION_ANGLE_180;
4994 case DRM_MODE_ROTATE_270:
4995 plane_info->rotation = ROTATION_ANGLE_270;
4998 plane_info->rotation = ROTATION_ANGLE_0;
5002 plane_info->visible = true;
5003 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5005 plane_info->layer_index = 0;
5007 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5008 &plane_info->color_space);
5012 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5013 plane_info->rotation, tiling_flags,
5014 &plane_info->tiling_info,
5015 &plane_info->plane_size,
5016 &plane_info->dcc, address, tmz_surface,
5021 fill_blending_from_plane_state(
5022 plane_state, &plane_info->per_pixel_alpha,
5023 &plane_info->global_alpha, &plane_info->global_alpha_value);
5028 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5029 struct dc_plane_state *dc_plane_state,
5030 struct drm_plane_state *plane_state,
5031 struct drm_crtc_state *crtc_state)
5033 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5034 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5035 struct dc_scaling_info scaling_info;
5036 struct dc_plane_info plane_info;
5038 bool force_disable_dcc = false;
5040 ret = fill_dc_scaling_info(plane_state, &scaling_info);
5044 dc_plane_state->src_rect = scaling_info.src_rect;
5045 dc_plane_state->dst_rect = scaling_info.dst_rect;
5046 dc_plane_state->clip_rect = scaling_info.clip_rect;
5047 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5049 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5050 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5053 &dc_plane_state->address,
5059 dc_plane_state->format = plane_info.format;
5060 dc_plane_state->color_space = plane_info.color_space;
5061 dc_plane_state->format = plane_info.format;
5062 dc_plane_state->plane_size = plane_info.plane_size;
5063 dc_plane_state->rotation = plane_info.rotation;
5064 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5065 dc_plane_state->stereo_format = plane_info.stereo_format;
5066 dc_plane_state->tiling_info = plane_info.tiling_info;
5067 dc_plane_state->visible = plane_info.visible;
5068 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5069 dc_plane_state->global_alpha = plane_info.global_alpha;
5070 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5071 dc_plane_state->dcc = plane_info.dcc;
5072 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5073 dc_plane_state->flip_int_enabled = true;
5076 * Always set input transfer function, since plane state is refreshed
5079 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5086 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5087 const struct dm_connector_state *dm_state,
5088 struct dc_stream_state *stream)
5090 enum amdgpu_rmx_type rmx_type;
5092 struct rect src = { 0 }; /* viewport in composition space*/
5093 struct rect dst = { 0 }; /* stream addressable area */
5095 /* no mode. nothing to be done */
5099 /* Full screen scaling by default */
5100 src.width = mode->hdisplay;
5101 src.height = mode->vdisplay;
5102 dst.width = stream->timing.h_addressable;
5103 dst.height = stream->timing.v_addressable;
5106 rmx_type = dm_state->scaling;
5107 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5108 if (src.width * dst.height <
5109 src.height * dst.width) {
5110 /* height needs less upscaling/more downscaling */
5111 dst.width = src.width *
5112 dst.height / src.height;
5114 /* width needs less upscaling/more downscaling */
5115 dst.height = src.height *
5116 dst.width / src.width;
5118 } else if (rmx_type == RMX_CENTER) {
5122 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5123 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5125 if (dm_state->underscan_enable) {
5126 dst.x += dm_state->underscan_hborder / 2;
5127 dst.y += dm_state->underscan_vborder / 2;
5128 dst.width -= dm_state->underscan_hborder;
5129 dst.height -= dm_state->underscan_vborder;
5136 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5137 dst.x, dst.y, dst.width, dst.height);
5141 static enum dc_color_depth
5142 convert_color_depth_from_display_info(const struct drm_connector *connector,
5143 bool is_y420, int requested_bpc)
5150 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5151 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5153 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5155 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5158 bpc = (uint8_t)connector->display_info.bpc;
5159 /* Assume 8 bpc by default if no bpc is specified. */
5160 bpc = bpc ? bpc : 8;
5163 if (requested_bpc > 0) {
5165 * Cap display bpc based on the user requested value.
5167 * The value for state->max_bpc may not correctly updated
5168 * depending on when the connector gets added to the state
5169 * or if this was called outside of atomic check, so it
5170 * can't be used directly.
5172 bpc = min_t(u8, bpc, requested_bpc);
5174 /* Round down to the nearest even number. */
5175 bpc = bpc - (bpc & 1);
5181 * Temporary Work around, DRM doesn't parse color depth for
5182 * EDID revision before 1.4
5183 * TODO: Fix edid parsing
5185 return COLOR_DEPTH_888;
5187 return COLOR_DEPTH_666;
5189 return COLOR_DEPTH_888;
5191 return COLOR_DEPTH_101010;
5193 return COLOR_DEPTH_121212;
5195 return COLOR_DEPTH_141414;
5197 return COLOR_DEPTH_161616;
5199 return COLOR_DEPTH_UNDEFINED;
5203 static enum dc_aspect_ratio
5204 get_aspect_ratio(const struct drm_display_mode *mode_in)
5206 /* 1-1 mapping, since both enums follow the HDMI spec. */
5207 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5210 static enum dc_color_space
5211 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5213 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5215 switch (dc_crtc_timing->pixel_encoding) {
5216 case PIXEL_ENCODING_YCBCR422:
5217 case PIXEL_ENCODING_YCBCR444:
5218 case PIXEL_ENCODING_YCBCR420:
5221 * 27030khz is the separation point between HDTV and SDTV
5222 * according to HDMI spec, we use YCbCr709 and YCbCr601
5225 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5226 if (dc_crtc_timing->flags.Y_ONLY)
5228 COLOR_SPACE_YCBCR709_LIMITED;
5230 color_space = COLOR_SPACE_YCBCR709;
5232 if (dc_crtc_timing->flags.Y_ONLY)
5234 COLOR_SPACE_YCBCR601_LIMITED;
5236 color_space = COLOR_SPACE_YCBCR601;
5241 case PIXEL_ENCODING_RGB:
5242 color_space = COLOR_SPACE_SRGB;
5253 static bool adjust_colour_depth_from_display_info(
5254 struct dc_crtc_timing *timing_out,
5255 const struct drm_display_info *info)
5257 enum dc_color_depth depth = timing_out->display_color_depth;
5260 normalized_clk = timing_out->pix_clk_100hz / 10;
5261 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5262 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5263 normalized_clk /= 2;
5264 /* Adjusting pix clock following on HDMI spec based on colour depth */
5266 case COLOR_DEPTH_888:
5268 case COLOR_DEPTH_101010:
5269 normalized_clk = (normalized_clk * 30) / 24;
5271 case COLOR_DEPTH_121212:
5272 normalized_clk = (normalized_clk * 36) / 24;
5274 case COLOR_DEPTH_161616:
5275 normalized_clk = (normalized_clk * 48) / 24;
5278 /* The above depths are the only ones valid for HDMI. */
5281 if (normalized_clk <= info->max_tmds_clock) {
5282 timing_out->display_color_depth = depth;
5285 } while (--depth > COLOR_DEPTH_666);
5289 static void fill_stream_properties_from_drm_display_mode(
5290 struct dc_stream_state *stream,
5291 const struct drm_display_mode *mode_in,
5292 const struct drm_connector *connector,
5293 const struct drm_connector_state *connector_state,
5294 const struct dc_stream_state *old_stream,
5297 struct dc_crtc_timing *timing_out = &stream->timing;
5298 const struct drm_display_info *info = &connector->display_info;
5299 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5300 struct hdmi_vendor_infoframe hv_frame;
5301 struct hdmi_avi_infoframe avi_frame;
5303 memset(&hv_frame, 0, sizeof(hv_frame));
5304 memset(&avi_frame, 0, sizeof(avi_frame));
5306 timing_out->h_border_left = 0;
5307 timing_out->h_border_right = 0;
5308 timing_out->v_border_top = 0;
5309 timing_out->v_border_bottom = 0;
5310 /* TODO: un-hardcode */
5311 if (drm_mode_is_420_only(info, mode_in)
5312 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5313 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5314 else if (drm_mode_is_420_also(info, mode_in)
5315 && aconnector->force_yuv420_output)
5316 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5317 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5318 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5319 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5321 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5323 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5324 timing_out->display_color_depth = convert_color_depth_from_display_info(
5326 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5328 timing_out->scan_type = SCANNING_TYPE_NODATA;
5329 timing_out->hdmi_vic = 0;
5332 timing_out->vic = old_stream->timing.vic;
5333 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5334 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5336 timing_out->vic = drm_match_cea_mode(mode_in);
5337 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5338 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5339 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5340 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5343 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5344 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5345 timing_out->vic = avi_frame.video_code;
5346 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5347 timing_out->hdmi_vic = hv_frame.vic;
5350 if (is_freesync_video_mode(mode_in, aconnector)) {
5351 timing_out->h_addressable = mode_in->hdisplay;
5352 timing_out->h_total = mode_in->htotal;
5353 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5354 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5355 timing_out->v_total = mode_in->vtotal;
5356 timing_out->v_addressable = mode_in->vdisplay;
5357 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5358 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5359 timing_out->pix_clk_100hz = mode_in->clock * 10;
5361 timing_out->h_addressable = mode_in->crtc_hdisplay;
5362 timing_out->h_total = mode_in->crtc_htotal;
5363 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5364 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5365 timing_out->v_total = mode_in->crtc_vtotal;
5366 timing_out->v_addressable = mode_in->crtc_vdisplay;
5367 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5368 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5369 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5372 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5374 stream->output_color_space = get_output_color_space(timing_out);
5376 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5377 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5378 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5379 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5380 drm_mode_is_420_also(info, mode_in) &&
5381 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5382 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5383 adjust_colour_depth_from_display_info(timing_out, info);
5388 static void fill_audio_info(struct audio_info *audio_info,
5389 const struct drm_connector *drm_connector,
5390 const struct dc_sink *dc_sink)
5393 int cea_revision = 0;
5394 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5396 audio_info->manufacture_id = edid_caps->manufacturer_id;
5397 audio_info->product_id = edid_caps->product_id;
5399 cea_revision = drm_connector->display_info.cea_rev;
5401 strscpy(audio_info->display_name,
5402 edid_caps->display_name,
5403 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5405 if (cea_revision >= 3) {
5406 audio_info->mode_count = edid_caps->audio_mode_count;
5408 for (i = 0; i < audio_info->mode_count; ++i) {
5409 audio_info->modes[i].format_code =
5410 (enum audio_format_code)
5411 (edid_caps->audio_modes[i].format_code);
5412 audio_info->modes[i].channel_count =
5413 edid_caps->audio_modes[i].channel_count;
5414 audio_info->modes[i].sample_rates.all =
5415 edid_caps->audio_modes[i].sample_rate;
5416 audio_info->modes[i].sample_size =
5417 edid_caps->audio_modes[i].sample_size;
5421 audio_info->flags.all = edid_caps->speaker_flags;
5423 /* TODO: We only check for the progressive mode, check for interlace mode too */
5424 if (drm_connector->latency_present[0]) {
5425 audio_info->video_latency = drm_connector->video_latency[0];
5426 audio_info->audio_latency = drm_connector->audio_latency[0];
5429 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5434 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5435 struct drm_display_mode *dst_mode)
5437 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5438 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5439 dst_mode->crtc_clock = src_mode->crtc_clock;
5440 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5441 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5442 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5443 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5444 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5445 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5446 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5447 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5448 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5449 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5450 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5454 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5455 const struct drm_display_mode *native_mode,
5458 if (scale_enabled) {
5459 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5460 } else if (native_mode->clock == drm_mode->clock &&
5461 native_mode->htotal == drm_mode->htotal &&
5462 native_mode->vtotal == drm_mode->vtotal) {
5463 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5465 /* no scaling nor amdgpu inserted, no need to patch */
5469 static struct dc_sink *
5470 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5472 struct dc_sink_init_data sink_init_data = { 0 };
5473 struct dc_sink *sink = NULL;
5474 sink_init_data.link = aconnector->dc_link;
5475 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5477 sink = dc_sink_create(&sink_init_data);
5479 DRM_ERROR("Failed to create sink!\n");
5482 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5487 static void set_multisync_trigger_params(
5488 struct dc_stream_state *stream)
5490 struct dc_stream_state *master = NULL;
5492 if (stream->triggered_crtc_reset.enabled) {
5493 master = stream->triggered_crtc_reset.event_source;
5494 stream->triggered_crtc_reset.event =
5495 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5496 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5497 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5501 static void set_master_stream(struct dc_stream_state *stream_set[],
5504 int j, highest_rfr = 0, master_stream = 0;
5506 for (j = 0; j < stream_count; j++) {
5507 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5508 int refresh_rate = 0;
5510 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5511 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5512 if (refresh_rate > highest_rfr) {
5513 highest_rfr = refresh_rate;
5518 for (j = 0; j < stream_count; j++) {
5520 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5524 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5527 struct dc_stream_state *stream;
5529 if (context->stream_count < 2)
5531 for (i = 0; i < context->stream_count ; i++) {
5532 if (!context->streams[i])
5535 * TODO: add a function to read AMD VSDB bits and set
5536 * crtc_sync_master.multi_sync_enabled flag
5537 * For now it's set to false
5541 set_master_stream(context->streams, context->stream_count);
5543 for (i = 0; i < context->stream_count ; i++) {
5544 stream = context->streams[i];
5549 set_multisync_trigger_params(stream);
5553 #if defined(CONFIG_DRM_AMD_DC_DCN)
5554 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5555 struct dc_sink *sink, struct dc_stream_state *stream,
5556 struct dsc_dec_dpcd_caps *dsc_caps)
5558 stream->timing.flags.DSC = 0;
5560 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5561 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5562 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5563 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5568 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5569 struct dc_sink *sink, struct dc_stream_state *stream,
5570 struct dsc_dec_dpcd_caps *dsc_caps)
5572 struct drm_connector *drm_connector = &aconnector->base;
5573 uint32_t link_bandwidth_kbps;
5575 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5576 dc_link_get_link_cap(aconnector->dc_link));
5577 /* Set DSC policy according to dsc_clock_en */
5578 dc_dsc_policy_set_enable_dsc_when_not_needed(
5579 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5581 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5583 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5585 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5587 link_bandwidth_kbps,
5589 &stream->timing.dsc_cfg)) {
5590 stream->timing.flags.DSC = 1;
5591 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5595 /* Overwrite the stream flag if DSC is enabled through debugfs */
5596 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5597 stream->timing.flags.DSC = 1;
5599 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5600 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5602 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5603 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5605 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5606 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5610 static struct drm_display_mode *
5611 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5612 bool use_probed_modes)
5614 struct drm_display_mode *m, *m_pref = NULL;
5615 u16 current_refresh, highest_refresh;
5616 struct list_head *list_head = use_probed_modes ?
5617 &aconnector->base.probed_modes :
5618 &aconnector->base.modes;
5620 if (aconnector->freesync_vid_base.clock != 0)
5621 return &aconnector->freesync_vid_base;
5623 /* Find the preferred mode */
5624 list_for_each_entry (m, list_head, head) {
5625 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5632 /* Probably an EDID with no preferred mode. Fallback to first entry */
5633 m_pref = list_first_entry_or_null(
5634 &aconnector->base.modes, struct drm_display_mode, head);
5636 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5641 highest_refresh = drm_mode_vrefresh(m_pref);
5644 * Find the mode with highest refresh rate with same resolution.
5645 * For some monitors, preferred mode is not the mode with highest
5646 * supported refresh rate.
5648 list_for_each_entry (m, list_head, head) {
5649 current_refresh = drm_mode_vrefresh(m);
5651 if (m->hdisplay == m_pref->hdisplay &&
5652 m->vdisplay == m_pref->vdisplay &&
5653 highest_refresh < current_refresh) {
5654 highest_refresh = current_refresh;
5659 aconnector->freesync_vid_base = *m_pref;
5663 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5664 struct amdgpu_dm_connector *aconnector)
5666 struct drm_display_mode *high_mode;
5669 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5670 if (!high_mode || !mode)
5673 timing_diff = high_mode->vtotal - mode->vtotal;
5675 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5676 high_mode->hdisplay != mode->hdisplay ||
5677 high_mode->vdisplay != mode->vdisplay ||
5678 high_mode->hsync_start != mode->hsync_start ||
5679 high_mode->hsync_end != mode->hsync_end ||
5680 high_mode->htotal != mode->htotal ||
5681 high_mode->hskew != mode->hskew ||
5682 high_mode->vscan != mode->vscan ||
5683 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5684 high_mode->vsync_end - mode->vsync_end != timing_diff)
5690 static struct dc_stream_state *
5691 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5692 const struct drm_display_mode *drm_mode,
5693 const struct dm_connector_state *dm_state,
5694 const struct dc_stream_state *old_stream,
5697 struct drm_display_mode *preferred_mode = NULL;
5698 struct drm_connector *drm_connector;
5699 const struct drm_connector_state *con_state =
5700 dm_state ? &dm_state->base : NULL;
5701 struct dc_stream_state *stream = NULL;
5702 struct drm_display_mode mode = *drm_mode;
5703 struct drm_display_mode saved_mode;
5704 struct drm_display_mode *freesync_mode = NULL;
5705 bool native_mode_found = false;
5706 bool recalculate_timing = false;
5707 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5709 int preferred_refresh = 0;
5710 #if defined(CONFIG_DRM_AMD_DC_DCN)
5711 struct dsc_dec_dpcd_caps dsc_caps;
5713 struct dc_sink *sink = NULL;
5715 memset(&saved_mode, 0, sizeof(saved_mode));
5717 if (aconnector == NULL) {
5718 DRM_ERROR("aconnector is NULL!\n");
5722 drm_connector = &aconnector->base;
5724 if (!aconnector->dc_sink) {
5725 sink = create_fake_sink(aconnector);
5729 sink = aconnector->dc_sink;
5730 dc_sink_retain(sink);
5733 stream = dc_create_stream_for_sink(sink);
5735 if (stream == NULL) {
5736 DRM_ERROR("Failed to create stream for sink!\n");
5740 stream->dm_stream_context = aconnector;
5742 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5743 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5745 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5746 /* Search for preferred mode */
5747 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5748 native_mode_found = true;
5752 if (!native_mode_found)
5753 preferred_mode = list_first_entry_or_null(
5754 &aconnector->base.modes,
5755 struct drm_display_mode,
5758 mode_refresh = drm_mode_vrefresh(&mode);
5760 if (preferred_mode == NULL) {
5762 * This may not be an error, the use case is when we have no
5763 * usermode calls to reset and set mode upon hotplug. In this
5764 * case, we call set mode ourselves to restore the previous mode
5765 * and the modelist may not be filled in in time.
5767 DRM_DEBUG_DRIVER("No preferred mode found\n");
5769 recalculate_timing = amdgpu_freesync_vid_mode &&
5770 is_freesync_video_mode(&mode, aconnector);
5771 if (recalculate_timing) {
5772 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5774 mode = *freesync_mode;
5776 decide_crtc_timing_for_drm_display_mode(
5777 &mode, preferred_mode, scale);
5779 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5783 if (recalculate_timing)
5784 drm_mode_set_crtcinfo(&saved_mode, 0);
5786 drm_mode_set_crtcinfo(&mode, 0);
5789 * If scaling is enabled and refresh rate didn't change
5790 * we copy the vic and polarities of the old timings
5792 if (!scale || mode_refresh != preferred_refresh)
5793 fill_stream_properties_from_drm_display_mode(
5794 stream, &mode, &aconnector->base, con_state, NULL,
5797 fill_stream_properties_from_drm_display_mode(
5798 stream, &mode, &aconnector->base, con_state, old_stream,
5801 #if defined(CONFIG_DRM_AMD_DC_DCN)
5802 /* SST DSC determination policy */
5803 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5804 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5805 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5808 update_stream_scaling_settings(&mode, dm_state, stream);
5811 &stream->audio_info,
5815 update_stream_signal(stream, sink);
5817 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5818 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5820 if (stream->link->psr_settings.psr_feature_enabled) {
5822 // should decide stream support vsc sdp colorimetry capability
5823 // before building vsc info packet
5825 stream->use_vsc_sdp_for_colorimetry = false;
5826 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5827 stream->use_vsc_sdp_for_colorimetry =
5828 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5830 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5831 stream->use_vsc_sdp_for_colorimetry = true;
5833 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5836 dc_sink_release(sink);
5841 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5843 drm_crtc_cleanup(crtc);
5847 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5848 struct drm_crtc_state *state)
5850 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5852 /* TODO Destroy dc_stream objects are stream object is flattened */
5854 dc_stream_release(cur->stream);
5857 __drm_atomic_helper_crtc_destroy_state(state);
5863 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5865 struct dm_crtc_state *state;
5868 dm_crtc_destroy_state(crtc, crtc->state);
5870 state = kzalloc(sizeof(*state), GFP_KERNEL);
5871 if (WARN_ON(!state))
5874 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5877 static struct drm_crtc_state *
5878 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5880 struct dm_crtc_state *state, *cur;
5882 cur = to_dm_crtc_state(crtc->state);
5884 if (WARN_ON(!crtc->state))
5887 state = kzalloc(sizeof(*state), GFP_KERNEL);
5891 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5894 state->stream = cur->stream;
5895 dc_stream_retain(state->stream);
5898 state->active_planes = cur->active_planes;
5899 state->vrr_infopacket = cur->vrr_infopacket;
5900 state->abm_level = cur->abm_level;
5901 state->vrr_supported = cur->vrr_supported;
5902 state->freesync_config = cur->freesync_config;
5903 state->cm_has_degamma = cur->cm_has_degamma;
5904 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5905 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5907 return &state->base;
5910 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5911 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5913 crtc_debugfs_init(crtc);
5919 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5921 enum dc_irq_source irq_source;
5922 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5923 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5926 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5928 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5930 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5931 acrtc->crtc_id, enable ? "en" : "dis", rc);
5935 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5937 enum dc_irq_source irq_source;
5938 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5939 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5940 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5941 #if defined(CONFIG_DRM_AMD_DC_DCN)
5942 struct amdgpu_display_manager *dm = &adev->dm;
5943 unsigned long flags;
5948 /* vblank irq on -> Only need vupdate irq in vrr mode */
5949 if (amdgpu_dm_vrr_active(acrtc_state))
5950 rc = dm_set_vupdate_irq(crtc, true);
5952 /* vblank irq off -> vupdate irq off */
5953 rc = dm_set_vupdate_irq(crtc, false);
5959 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5961 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5964 if (amdgpu_in_reset(adev))
5967 #if defined(CONFIG_DRM_AMD_DC_DCN)
5968 spin_lock_irqsave(&dm->vblank_lock, flags);
5969 dm->vblank_workqueue->dm = dm;
5970 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5971 dm->vblank_workqueue->enable = enable;
5972 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5973 schedule_work(&dm->vblank_workqueue->mall_work);
5979 static int dm_enable_vblank(struct drm_crtc *crtc)
5981 return dm_set_vblank(crtc, true);
5984 static void dm_disable_vblank(struct drm_crtc *crtc)
5986 dm_set_vblank(crtc, false);
5989 /* Implemented only the options currently availible for the driver */
5990 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5991 .reset = dm_crtc_reset_state,
5992 .destroy = amdgpu_dm_crtc_destroy,
5993 .set_config = drm_atomic_helper_set_config,
5994 .page_flip = drm_atomic_helper_page_flip,
5995 .atomic_duplicate_state = dm_crtc_duplicate_state,
5996 .atomic_destroy_state = dm_crtc_destroy_state,
5997 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5998 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5999 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6000 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6001 .enable_vblank = dm_enable_vblank,
6002 .disable_vblank = dm_disable_vblank,
6003 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6004 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6005 .late_register = amdgpu_dm_crtc_late_register,
6009 static enum drm_connector_status
6010 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6013 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6017 * 1. This interface is NOT called in context of HPD irq.
6018 * 2. This interface *is called* in context of user-mode ioctl. Which
6019 * makes it a bad place for *any* MST-related activity.
6022 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6023 !aconnector->fake_enable)
6024 connected = (aconnector->dc_sink != NULL);
6026 connected = (aconnector->base.force == DRM_FORCE_ON);
6028 update_subconnector_property(aconnector);
6030 return (connected ? connector_status_connected :
6031 connector_status_disconnected);
6034 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6035 struct drm_connector_state *connector_state,
6036 struct drm_property *property,
6039 struct drm_device *dev = connector->dev;
6040 struct amdgpu_device *adev = drm_to_adev(dev);
6041 struct dm_connector_state *dm_old_state =
6042 to_dm_connector_state(connector->state);
6043 struct dm_connector_state *dm_new_state =
6044 to_dm_connector_state(connector_state);
6048 if (property == dev->mode_config.scaling_mode_property) {
6049 enum amdgpu_rmx_type rmx_type;
6052 case DRM_MODE_SCALE_CENTER:
6053 rmx_type = RMX_CENTER;
6055 case DRM_MODE_SCALE_ASPECT:
6056 rmx_type = RMX_ASPECT;
6058 case DRM_MODE_SCALE_FULLSCREEN:
6059 rmx_type = RMX_FULL;
6061 case DRM_MODE_SCALE_NONE:
6067 if (dm_old_state->scaling == rmx_type)
6070 dm_new_state->scaling = rmx_type;
6072 } else if (property == adev->mode_info.underscan_hborder_property) {
6073 dm_new_state->underscan_hborder = val;
6075 } else if (property == adev->mode_info.underscan_vborder_property) {
6076 dm_new_state->underscan_vborder = val;
6078 } else if (property == adev->mode_info.underscan_property) {
6079 dm_new_state->underscan_enable = val;
6081 } else if (property == adev->mode_info.abm_level_property) {
6082 dm_new_state->abm_level = val;
6089 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6090 const struct drm_connector_state *state,
6091 struct drm_property *property,
6094 struct drm_device *dev = connector->dev;
6095 struct amdgpu_device *adev = drm_to_adev(dev);
6096 struct dm_connector_state *dm_state =
6097 to_dm_connector_state(state);
6100 if (property == dev->mode_config.scaling_mode_property) {
6101 switch (dm_state->scaling) {
6103 *val = DRM_MODE_SCALE_CENTER;
6106 *val = DRM_MODE_SCALE_ASPECT;
6109 *val = DRM_MODE_SCALE_FULLSCREEN;
6113 *val = DRM_MODE_SCALE_NONE;
6117 } else if (property == adev->mode_info.underscan_hborder_property) {
6118 *val = dm_state->underscan_hborder;
6120 } else if (property == adev->mode_info.underscan_vborder_property) {
6121 *val = dm_state->underscan_vborder;
6123 } else if (property == adev->mode_info.underscan_property) {
6124 *val = dm_state->underscan_enable;
6126 } else if (property == adev->mode_info.abm_level_property) {
6127 *val = dm_state->abm_level;
6134 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6136 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6138 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6141 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6143 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6144 const struct dc_link *link = aconnector->dc_link;
6145 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6146 struct amdgpu_display_manager *dm = &adev->dm;
6149 * Call only if mst_mgr was iniitalized before since it's not done
6150 * for all connector types.
6152 if (aconnector->mst_mgr.dev)
6153 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6155 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6156 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6158 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6159 link->type != dc_connection_none &&
6160 dm->backlight_dev) {
6161 backlight_device_unregister(dm->backlight_dev);
6162 dm->backlight_dev = NULL;
6166 if (aconnector->dc_em_sink)
6167 dc_sink_release(aconnector->dc_em_sink);
6168 aconnector->dc_em_sink = NULL;
6169 if (aconnector->dc_sink)
6170 dc_sink_release(aconnector->dc_sink);
6171 aconnector->dc_sink = NULL;
6173 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6174 drm_connector_unregister(connector);
6175 drm_connector_cleanup(connector);
6176 if (aconnector->i2c) {
6177 i2c_del_adapter(&aconnector->i2c->base);
6178 kfree(aconnector->i2c);
6180 kfree(aconnector->dm_dp_aux.aux.name);
6185 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6187 struct dm_connector_state *state =
6188 to_dm_connector_state(connector->state);
6190 if (connector->state)
6191 __drm_atomic_helper_connector_destroy_state(connector->state);
6195 state = kzalloc(sizeof(*state), GFP_KERNEL);
6198 state->scaling = RMX_OFF;
6199 state->underscan_enable = false;
6200 state->underscan_hborder = 0;
6201 state->underscan_vborder = 0;
6202 state->base.max_requested_bpc = 8;
6203 state->vcpi_slots = 0;
6205 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6206 state->abm_level = amdgpu_dm_abm_level;
6208 __drm_atomic_helper_connector_reset(connector, &state->base);
6212 struct drm_connector_state *
6213 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6215 struct dm_connector_state *state =
6216 to_dm_connector_state(connector->state);
6218 struct dm_connector_state *new_state =
6219 kmemdup(state, sizeof(*state), GFP_KERNEL);
6224 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6226 new_state->freesync_capable = state->freesync_capable;
6227 new_state->abm_level = state->abm_level;
6228 new_state->scaling = state->scaling;
6229 new_state->underscan_enable = state->underscan_enable;
6230 new_state->underscan_hborder = state->underscan_hborder;
6231 new_state->underscan_vborder = state->underscan_vborder;
6232 new_state->vcpi_slots = state->vcpi_slots;
6233 new_state->pbn = state->pbn;
6234 return &new_state->base;
6238 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6240 struct amdgpu_dm_connector *amdgpu_dm_connector =
6241 to_amdgpu_dm_connector(connector);
6244 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6245 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6246 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6247 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6252 #if defined(CONFIG_DEBUG_FS)
6253 connector_debugfs_init(amdgpu_dm_connector);
6259 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6260 .reset = amdgpu_dm_connector_funcs_reset,
6261 .detect = amdgpu_dm_connector_detect,
6262 .fill_modes = drm_helper_probe_single_connector_modes,
6263 .destroy = amdgpu_dm_connector_destroy,
6264 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6265 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6266 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6267 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6268 .late_register = amdgpu_dm_connector_late_register,
6269 .early_unregister = amdgpu_dm_connector_unregister
6272 static int get_modes(struct drm_connector *connector)
6274 return amdgpu_dm_connector_get_modes(connector);
6277 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6279 struct dc_sink_init_data init_params = {
6280 .link = aconnector->dc_link,
6281 .sink_signal = SIGNAL_TYPE_VIRTUAL
6285 if (!aconnector->base.edid_blob_ptr) {
6286 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6287 aconnector->base.name);
6289 aconnector->base.force = DRM_FORCE_OFF;
6290 aconnector->base.override_edid = false;
6294 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6296 aconnector->edid = edid;
6298 aconnector->dc_em_sink = dc_link_add_remote_sink(
6299 aconnector->dc_link,
6301 (edid->extensions + 1) * EDID_LENGTH,
6304 if (aconnector->base.force == DRM_FORCE_ON) {
6305 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6306 aconnector->dc_link->local_sink :
6307 aconnector->dc_em_sink;
6308 dc_sink_retain(aconnector->dc_sink);
6312 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6314 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6317 * In case of headless boot with force on for DP managed connector
6318 * Those settings have to be != 0 to get initial modeset
6320 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6321 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6322 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6326 aconnector->base.override_edid = true;
6327 create_eml_sink(aconnector);
6330 static struct dc_stream_state *
6331 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6332 const struct drm_display_mode *drm_mode,
6333 const struct dm_connector_state *dm_state,
6334 const struct dc_stream_state *old_stream)
6336 struct drm_connector *connector = &aconnector->base;
6337 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6338 struct dc_stream_state *stream;
6339 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6340 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6341 enum dc_status dc_result = DC_OK;
6344 stream = create_stream_for_sink(aconnector, drm_mode,
6345 dm_state, old_stream,
6347 if (stream == NULL) {
6348 DRM_ERROR("Failed to create stream for sink!\n");
6352 dc_result = dc_validate_stream(adev->dm.dc, stream);
6354 if (dc_result != DC_OK) {
6355 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6360 dc_status_to_str(dc_result));
6362 dc_stream_release(stream);
6364 requested_bpc -= 2; /* lower bpc to retry validation */
6367 } while (stream == NULL && requested_bpc >= 6);
6369 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6370 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6372 aconnector->force_yuv420_output = true;
6373 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6374 dm_state, old_stream);
6375 aconnector->force_yuv420_output = false;
6381 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6382 struct drm_display_mode *mode)
6384 int result = MODE_ERROR;
6385 struct dc_sink *dc_sink;
6386 /* TODO: Unhardcode stream count */
6387 struct dc_stream_state *stream;
6388 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6390 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6391 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6395 * Only run this the first time mode_valid is called to initilialize
6398 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6399 !aconnector->dc_em_sink)
6400 handle_edid_mgmt(aconnector);
6402 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6404 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6405 aconnector->base.force != DRM_FORCE_ON) {
6406 DRM_ERROR("dc_sink is NULL!\n");
6410 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6412 dc_stream_release(stream);
6417 /* TODO: error handling*/
6421 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6422 struct dc_info_packet *out)
6424 struct hdmi_drm_infoframe frame;
6425 unsigned char buf[30]; /* 26 + 4 */
6429 memset(out, 0, sizeof(*out));
6431 if (!state->hdr_output_metadata)
6434 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6438 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6442 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6446 /* Prepare the infopacket for DC. */
6447 switch (state->connector->connector_type) {
6448 case DRM_MODE_CONNECTOR_HDMIA:
6449 out->hb0 = 0x87; /* type */
6450 out->hb1 = 0x01; /* version */
6451 out->hb2 = 0x1A; /* length */
6452 out->sb[0] = buf[3]; /* checksum */
6456 case DRM_MODE_CONNECTOR_DisplayPort:
6457 case DRM_MODE_CONNECTOR_eDP:
6458 out->hb0 = 0x00; /* sdp id, zero */
6459 out->hb1 = 0x87; /* type */
6460 out->hb2 = 0x1D; /* payload len - 1 */
6461 out->hb3 = (0x13 << 2); /* sdp version */
6462 out->sb[0] = 0x01; /* version */
6463 out->sb[1] = 0x1A; /* length */
6471 memcpy(&out->sb[i], &buf[4], 26);
6474 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6475 sizeof(out->sb), false);
6481 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6482 struct drm_atomic_state *state)
6484 struct drm_connector_state *new_con_state =
6485 drm_atomic_get_new_connector_state(state, conn);
6486 struct drm_connector_state *old_con_state =
6487 drm_atomic_get_old_connector_state(state, conn);
6488 struct drm_crtc *crtc = new_con_state->crtc;
6489 struct drm_crtc_state *new_crtc_state;
6492 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6497 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6498 struct dc_info_packet hdr_infopacket;
6500 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6504 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6505 if (IS_ERR(new_crtc_state))
6506 return PTR_ERR(new_crtc_state);
6509 * DC considers the stream backends changed if the
6510 * static metadata changes. Forcing the modeset also
6511 * gives a simple way for userspace to switch from
6512 * 8bpc to 10bpc when setting the metadata to enter
6515 * Changing the static metadata after it's been
6516 * set is permissible, however. So only force a
6517 * modeset if we're entering or exiting HDR.
6519 new_crtc_state->mode_changed =
6520 !old_con_state->hdr_output_metadata ||
6521 !new_con_state->hdr_output_metadata;
6527 static const struct drm_connector_helper_funcs
6528 amdgpu_dm_connector_helper_funcs = {
6530 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6531 * modes will be filtered by drm_mode_validate_size(), and those modes
6532 * are missing after user start lightdm. So we need to renew modes list.
6533 * in get_modes call back, not just return the modes count
6535 .get_modes = get_modes,
6536 .mode_valid = amdgpu_dm_connector_mode_valid,
6537 .atomic_check = amdgpu_dm_connector_atomic_check,
6540 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6544 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6546 struct drm_atomic_state *state = new_crtc_state->state;
6547 struct drm_plane *plane;
6550 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6551 struct drm_plane_state *new_plane_state;
6553 /* Cursor planes are "fake". */
6554 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6557 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6559 if (!new_plane_state) {
6561 * The plane is enable on the CRTC and hasn't changed
6562 * state. This means that it previously passed
6563 * validation and is therefore enabled.
6569 /* We need a framebuffer to be considered enabled. */
6570 num_active += (new_plane_state->fb != NULL);
6576 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6577 struct drm_crtc_state *new_crtc_state)
6579 struct dm_crtc_state *dm_new_crtc_state =
6580 to_dm_crtc_state(new_crtc_state);
6582 dm_new_crtc_state->active_planes = 0;
6584 if (!dm_new_crtc_state->stream)
6587 dm_new_crtc_state->active_planes =
6588 count_crtc_active_planes(new_crtc_state);
6591 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6592 struct drm_atomic_state *state)
6594 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6596 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6597 struct dc *dc = adev->dm.dc;
6598 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6601 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6603 dm_update_crtc_active_planes(crtc, crtc_state);
6605 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6606 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6611 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6612 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6613 * planes are disabled, which is not supported by the hardware. And there is legacy
6614 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6616 if (crtc_state->enable &&
6617 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6618 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6622 /* In some use cases, like reset, no stream is attached */
6623 if (!dm_crtc_state->stream)
6626 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6629 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6633 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6634 const struct drm_display_mode *mode,
6635 struct drm_display_mode *adjusted_mode)
6640 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6641 .disable = dm_crtc_helper_disable,
6642 .atomic_check = dm_crtc_helper_atomic_check,
6643 .mode_fixup = dm_crtc_helper_mode_fixup,
6644 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6647 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6652 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6654 switch (display_color_depth) {
6655 case COLOR_DEPTH_666:
6657 case COLOR_DEPTH_888:
6659 case COLOR_DEPTH_101010:
6661 case COLOR_DEPTH_121212:
6663 case COLOR_DEPTH_141414:
6665 case COLOR_DEPTH_161616:
6673 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6674 struct drm_crtc_state *crtc_state,
6675 struct drm_connector_state *conn_state)
6677 struct drm_atomic_state *state = crtc_state->state;
6678 struct drm_connector *connector = conn_state->connector;
6679 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6680 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6681 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6682 struct drm_dp_mst_topology_mgr *mst_mgr;
6683 struct drm_dp_mst_port *mst_port;
6684 enum dc_color_depth color_depth;
6686 bool is_y420 = false;
6688 if (!aconnector->port || !aconnector->dc_sink)
6691 mst_port = aconnector->port;
6692 mst_mgr = &aconnector->mst_port->mst_mgr;
6694 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6697 if (!state->duplicated) {
6698 int max_bpc = conn_state->max_requested_bpc;
6699 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6700 aconnector->force_yuv420_output;
6701 color_depth = convert_color_depth_from_display_info(connector,
6704 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6705 clock = adjusted_mode->clock;
6706 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6708 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6711 dm_new_connector_state->pbn,
6712 dm_mst_get_pbn_divider(aconnector->dc_link));
6713 if (dm_new_connector_state->vcpi_slots < 0) {
6714 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6715 return dm_new_connector_state->vcpi_slots;
6720 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6721 .disable = dm_encoder_helper_disable,
6722 .atomic_check = dm_encoder_helper_atomic_check
6725 #if defined(CONFIG_DRM_AMD_DC_DCN)
6726 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6727 struct dc_state *dc_state)
6729 struct dc_stream_state *stream = NULL;
6730 struct drm_connector *connector;
6731 struct drm_connector_state *new_con_state;
6732 struct amdgpu_dm_connector *aconnector;
6733 struct dm_connector_state *dm_conn_state;
6734 int i, j, clock, bpp;
6735 int vcpi, pbn_div, pbn = 0;
6737 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6739 aconnector = to_amdgpu_dm_connector(connector);
6741 if (!aconnector->port)
6744 if (!new_con_state || !new_con_state->crtc)
6747 dm_conn_state = to_dm_connector_state(new_con_state);
6749 for (j = 0; j < dc_state->stream_count; j++) {
6750 stream = dc_state->streams[j];
6754 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6763 if (stream->timing.flags.DSC != 1) {
6764 drm_dp_mst_atomic_enable_dsc(state,
6772 pbn_div = dm_mst_get_pbn_divider(stream->link);
6773 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6774 clock = stream->timing.pix_clk_100hz / 10;
6775 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6776 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6783 dm_conn_state->pbn = pbn;
6784 dm_conn_state->vcpi_slots = vcpi;
6790 static void dm_drm_plane_reset(struct drm_plane *plane)
6792 struct dm_plane_state *amdgpu_state = NULL;
6795 plane->funcs->atomic_destroy_state(plane, plane->state);
6797 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6798 WARN_ON(amdgpu_state == NULL);
6801 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6804 static struct drm_plane_state *
6805 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6807 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6809 old_dm_plane_state = to_dm_plane_state(plane->state);
6810 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6811 if (!dm_plane_state)
6814 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6816 if (old_dm_plane_state->dc_state) {
6817 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6818 dc_plane_state_retain(dm_plane_state->dc_state);
6821 return &dm_plane_state->base;
6824 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6825 struct drm_plane_state *state)
6827 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6829 if (dm_plane_state->dc_state)
6830 dc_plane_state_release(dm_plane_state->dc_state);
6832 drm_atomic_helper_plane_destroy_state(plane, state);
6835 static const struct drm_plane_funcs dm_plane_funcs = {
6836 .update_plane = drm_atomic_helper_update_plane,
6837 .disable_plane = drm_atomic_helper_disable_plane,
6838 .destroy = drm_primary_helper_destroy,
6839 .reset = dm_drm_plane_reset,
6840 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6841 .atomic_destroy_state = dm_drm_plane_destroy_state,
6842 .format_mod_supported = dm_plane_format_mod_supported,
6845 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6846 struct drm_plane_state *new_state)
6848 struct amdgpu_framebuffer *afb;
6849 struct drm_gem_object *obj;
6850 struct amdgpu_device *adev;
6851 struct amdgpu_bo *rbo;
6852 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6853 struct list_head list;
6854 struct ttm_validate_buffer tv;
6855 struct ww_acquire_ctx ticket;
6859 if (!new_state->fb) {
6860 DRM_DEBUG_KMS("No FB bound\n");
6864 afb = to_amdgpu_framebuffer(new_state->fb);
6865 obj = new_state->fb->obj[0];
6866 rbo = gem_to_amdgpu_bo(obj);
6867 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6868 INIT_LIST_HEAD(&list);
6872 list_add(&tv.head, &list);
6874 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6876 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6880 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6881 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6883 domain = AMDGPU_GEM_DOMAIN_VRAM;
6885 r = amdgpu_bo_pin(rbo, domain);
6886 if (unlikely(r != 0)) {
6887 if (r != -ERESTARTSYS)
6888 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6889 ttm_eu_backoff_reservation(&ticket, &list);
6893 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6894 if (unlikely(r != 0)) {
6895 amdgpu_bo_unpin(rbo);
6896 ttm_eu_backoff_reservation(&ticket, &list);
6897 DRM_ERROR("%p bind failed\n", rbo);
6901 ttm_eu_backoff_reservation(&ticket, &list);
6903 afb->address = amdgpu_bo_gpu_offset(rbo);
6908 * We don't do surface updates on planes that have been newly created,
6909 * but we also don't have the afb->address during atomic check.
6911 * Fill in buffer attributes depending on the address here, but only on
6912 * newly created planes since they're not being used by DC yet and this
6913 * won't modify global state.
6915 dm_plane_state_old = to_dm_plane_state(plane->state);
6916 dm_plane_state_new = to_dm_plane_state(new_state);
6918 if (dm_plane_state_new->dc_state &&
6919 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6920 struct dc_plane_state *plane_state =
6921 dm_plane_state_new->dc_state;
6922 bool force_disable_dcc = !plane_state->dcc.enable;
6924 fill_plane_buffer_attributes(
6925 adev, afb, plane_state->format, plane_state->rotation,
6927 &plane_state->tiling_info, &plane_state->plane_size,
6928 &plane_state->dcc, &plane_state->address,
6929 afb->tmz_surface, force_disable_dcc);
6935 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6936 struct drm_plane_state *old_state)
6938 struct amdgpu_bo *rbo;
6944 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6945 r = amdgpu_bo_reserve(rbo, false);
6947 DRM_ERROR("failed to reserve rbo before unpin\n");
6951 amdgpu_bo_unpin(rbo);
6952 amdgpu_bo_unreserve(rbo);
6953 amdgpu_bo_unref(&rbo);
6956 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6957 struct drm_crtc_state *new_crtc_state)
6959 struct drm_framebuffer *fb = state->fb;
6960 int min_downscale, max_upscale;
6962 int max_scale = INT_MAX;
6964 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6965 if (fb && state->crtc) {
6966 /* Validate viewport to cover the case when only the position changes */
6967 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6968 int viewport_width = state->crtc_w;
6969 int viewport_height = state->crtc_h;
6971 if (state->crtc_x < 0)
6972 viewport_width += state->crtc_x;
6973 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6974 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6976 if (state->crtc_y < 0)
6977 viewport_height += state->crtc_y;
6978 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6979 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6981 if (viewport_width < 0 || viewport_height < 0) {
6982 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6984 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6985 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6987 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6988 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6994 /* Get min/max allowed scaling factors from plane caps. */
6995 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6996 &min_downscale, &max_upscale);
6998 * Convert to drm convention: 16.16 fixed point, instead of dc's
6999 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7000 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7002 min_scale = (1000 << 16) / max_upscale;
7003 max_scale = (1000 << 16) / min_downscale;
7006 return drm_atomic_helper_check_plane_state(
7007 state, new_crtc_state, min_scale, max_scale, true, true);
7010 static int dm_plane_atomic_check(struct drm_plane *plane,
7011 struct drm_atomic_state *state)
7013 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7015 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7016 struct dc *dc = adev->dm.dc;
7017 struct dm_plane_state *dm_plane_state;
7018 struct dc_scaling_info scaling_info;
7019 struct drm_crtc_state *new_crtc_state;
7022 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7024 dm_plane_state = to_dm_plane_state(new_plane_state);
7026 if (!dm_plane_state->dc_state)
7030 drm_atomic_get_new_crtc_state(state,
7031 new_plane_state->crtc);
7032 if (!new_crtc_state)
7035 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7039 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7043 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7049 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7050 struct drm_atomic_state *state)
7052 /* Only support async updates on cursor planes. */
7053 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7059 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7060 struct drm_atomic_state *state)
7062 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7064 struct drm_plane_state *old_state =
7065 drm_atomic_get_old_plane_state(state, plane);
7067 trace_amdgpu_dm_atomic_update_cursor(new_state);
7069 swap(plane->state->fb, new_state->fb);
7071 plane->state->src_x = new_state->src_x;
7072 plane->state->src_y = new_state->src_y;
7073 plane->state->src_w = new_state->src_w;
7074 plane->state->src_h = new_state->src_h;
7075 plane->state->crtc_x = new_state->crtc_x;
7076 plane->state->crtc_y = new_state->crtc_y;
7077 plane->state->crtc_w = new_state->crtc_w;
7078 plane->state->crtc_h = new_state->crtc_h;
7080 handle_cursor_update(plane, old_state);
7083 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7084 .prepare_fb = dm_plane_helper_prepare_fb,
7085 .cleanup_fb = dm_plane_helper_cleanup_fb,
7086 .atomic_check = dm_plane_atomic_check,
7087 .atomic_async_check = dm_plane_atomic_async_check,
7088 .atomic_async_update = dm_plane_atomic_async_update
7092 * TODO: these are currently initialized to rgb formats only.
7093 * For future use cases we should either initialize them dynamically based on
7094 * plane capabilities, or initialize this array to all formats, so internal drm
7095 * check will succeed, and let DC implement proper check
7097 static const uint32_t rgb_formats[] = {
7098 DRM_FORMAT_XRGB8888,
7099 DRM_FORMAT_ARGB8888,
7100 DRM_FORMAT_RGBA8888,
7101 DRM_FORMAT_XRGB2101010,
7102 DRM_FORMAT_XBGR2101010,
7103 DRM_FORMAT_ARGB2101010,
7104 DRM_FORMAT_ABGR2101010,
7105 DRM_FORMAT_XRGB16161616,
7106 DRM_FORMAT_XBGR16161616,
7107 DRM_FORMAT_ARGB16161616,
7108 DRM_FORMAT_ABGR16161616,
7109 DRM_FORMAT_XBGR8888,
7110 DRM_FORMAT_ABGR8888,
7114 static const uint32_t overlay_formats[] = {
7115 DRM_FORMAT_XRGB8888,
7116 DRM_FORMAT_ARGB8888,
7117 DRM_FORMAT_RGBA8888,
7118 DRM_FORMAT_XBGR8888,
7119 DRM_FORMAT_ABGR8888,
7123 static const u32 cursor_formats[] = {
7127 static int get_plane_formats(const struct drm_plane *plane,
7128 const struct dc_plane_cap *plane_cap,
7129 uint32_t *formats, int max_formats)
7131 int i, num_formats = 0;
7134 * TODO: Query support for each group of formats directly from
7135 * DC plane caps. This will require adding more formats to the
7139 switch (plane->type) {
7140 case DRM_PLANE_TYPE_PRIMARY:
7141 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7142 if (num_formats >= max_formats)
7145 formats[num_formats++] = rgb_formats[i];
7148 if (plane_cap && plane_cap->pixel_format_support.nv12)
7149 formats[num_formats++] = DRM_FORMAT_NV12;
7150 if (plane_cap && plane_cap->pixel_format_support.p010)
7151 formats[num_formats++] = DRM_FORMAT_P010;
7152 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7153 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7154 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7155 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7156 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7160 case DRM_PLANE_TYPE_OVERLAY:
7161 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7162 if (num_formats >= max_formats)
7165 formats[num_formats++] = overlay_formats[i];
7169 case DRM_PLANE_TYPE_CURSOR:
7170 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7171 if (num_formats >= max_formats)
7174 formats[num_formats++] = cursor_formats[i];
7182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7183 struct drm_plane *plane,
7184 unsigned long possible_crtcs,
7185 const struct dc_plane_cap *plane_cap)
7187 uint32_t formats[32];
7190 unsigned int supported_rotations;
7191 uint64_t *modifiers = NULL;
7193 num_formats = get_plane_formats(plane, plane_cap, formats,
7194 ARRAY_SIZE(formats));
7196 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7200 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7201 &dm_plane_funcs, formats, num_formats,
7202 modifiers, plane->type, NULL);
7207 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7208 plane_cap && plane_cap->per_pixel_alpha) {
7209 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7210 BIT(DRM_MODE_BLEND_PREMULTI);
7212 drm_plane_create_alpha_property(plane);
7213 drm_plane_create_blend_mode_property(plane, blend_caps);
7216 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7218 (plane_cap->pixel_format_support.nv12 ||
7219 plane_cap->pixel_format_support.p010)) {
7220 /* This only affects YUV formats. */
7221 drm_plane_create_color_properties(
7223 BIT(DRM_COLOR_YCBCR_BT601) |
7224 BIT(DRM_COLOR_YCBCR_BT709) |
7225 BIT(DRM_COLOR_YCBCR_BT2020),
7226 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7227 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7228 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7231 supported_rotations =
7232 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7233 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7235 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7236 plane->type != DRM_PLANE_TYPE_CURSOR)
7237 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7238 supported_rotations);
7240 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7242 /* Create (reset) the plane state */
7243 if (plane->funcs->reset)
7244 plane->funcs->reset(plane);
7249 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7250 struct drm_plane *plane,
7251 uint32_t crtc_index)
7253 struct amdgpu_crtc *acrtc = NULL;
7254 struct drm_plane *cursor_plane;
7258 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7262 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7263 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7265 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7269 res = drm_crtc_init_with_planes(
7274 &amdgpu_dm_crtc_funcs, NULL);
7279 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7281 /* Create (reset) the plane state */
7282 if (acrtc->base.funcs->reset)
7283 acrtc->base.funcs->reset(&acrtc->base);
7285 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7286 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7288 acrtc->crtc_id = crtc_index;
7289 acrtc->base.enabled = false;
7290 acrtc->otg_inst = -1;
7292 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7293 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7294 true, MAX_COLOR_LUT_ENTRIES);
7295 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7301 kfree(cursor_plane);
7306 static int to_drm_connector_type(enum signal_type st)
7309 case SIGNAL_TYPE_HDMI_TYPE_A:
7310 return DRM_MODE_CONNECTOR_HDMIA;
7311 case SIGNAL_TYPE_EDP:
7312 return DRM_MODE_CONNECTOR_eDP;
7313 case SIGNAL_TYPE_LVDS:
7314 return DRM_MODE_CONNECTOR_LVDS;
7315 case SIGNAL_TYPE_RGB:
7316 return DRM_MODE_CONNECTOR_VGA;
7317 case SIGNAL_TYPE_DISPLAY_PORT:
7318 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7319 return DRM_MODE_CONNECTOR_DisplayPort;
7320 case SIGNAL_TYPE_DVI_DUAL_LINK:
7321 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7322 return DRM_MODE_CONNECTOR_DVID;
7323 case SIGNAL_TYPE_VIRTUAL:
7324 return DRM_MODE_CONNECTOR_VIRTUAL;
7327 return DRM_MODE_CONNECTOR_Unknown;
7331 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7333 struct drm_encoder *encoder;
7335 /* There is only one encoder per connector */
7336 drm_connector_for_each_possible_encoder(connector, encoder)
7342 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7344 struct drm_encoder *encoder;
7345 struct amdgpu_encoder *amdgpu_encoder;
7347 encoder = amdgpu_dm_connector_to_encoder(connector);
7349 if (encoder == NULL)
7352 amdgpu_encoder = to_amdgpu_encoder(encoder);
7354 amdgpu_encoder->native_mode.clock = 0;
7356 if (!list_empty(&connector->probed_modes)) {
7357 struct drm_display_mode *preferred_mode = NULL;
7359 list_for_each_entry(preferred_mode,
7360 &connector->probed_modes,
7362 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7363 amdgpu_encoder->native_mode = *preferred_mode;
7371 static struct drm_display_mode *
7372 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7374 int hdisplay, int vdisplay)
7376 struct drm_device *dev = encoder->dev;
7377 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7378 struct drm_display_mode *mode = NULL;
7379 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7381 mode = drm_mode_duplicate(dev, native_mode);
7386 mode->hdisplay = hdisplay;
7387 mode->vdisplay = vdisplay;
7388 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7389 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7395 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7396 struct drm_connector *connector)
7398 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7399 struct drm_display_mode *mode = NULL;
7400 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7401 struct amdgpu_dm_connector *amdgpu_dm_connector =
7402 to_amdgpu_dm_connector(connector);
7406 char name[DRM_DISPLAY_MODE_LEN];
7409 } common_modes[] = {
7410 { "640x480", 640, 480},
7411 { "800x600", 800, 600},
7412 { "1024x768", 1024, 768},
7413 { "1280x720", 1280, 720},
7414 { "1280x800", 1280, 800},
7415 {"1280x1024", 1280, 1024},
7416 { "1440x900", 1440, 900},
7417 {"1680x1050", 1680, 1050},
7418 {"1600x1200", 1600, 1200},
7419 {"1920x1080", 1920, 1080},
7420 {"1920x1200", 1920, 1200}
7423 n = ARRAY_SIZE(common_modes);
7425 for (i = 0; i < n; i++) {
7426 struct drm_display_mode *curmode = NULL;
7427 bool mode_existed = false;
7429 if (common_modes[i].w > native_mode->hdisplay ||
7430 common_modes[i].h > native_mode->vdisplay ||
7431 (common_modes[i].w == native_mode->hdisplay &&
7432 common_modes[i].h == native_mode->vdisplay))
7435 list_for_each_entry(curmode, &connector->probed_modes, head) {
7436 if (common_modes[i].w == curmode->hdisplay &&
7437 common_modes[i].h == curmode->vdisplay) {
7438 mode_existed = true;
7446 mode = amdgpu_dm_create_common_mode(encoder,
7447 common_modes[i].name, common_modes[i].w,
7449 drm_mode_probed_add(connector, mode);
7450 amdgpu_dm_connector->num_modes++;
7454 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7457 struct amdgpu_dm_connector *amdgpu_dm_connector =
7458 to_amdgpu_dm_connector(connector);
7461 /* empty probed_modes */
7462 INIT_LIST_HEAD(&connector->probed_modes);
7463 amdgpu_dm_connector->num_modes =
7464 drm_add_edid_modes(connector, edid);
7466 /* sorting the probed modes before calling function
7467 * amdgpu_dm_get_native_mode() since EDID can have
7468 * more than one preferred mode. The modes that are
7469 * later in the probed mode list could be of higher
7470 * and preferred resolution. For example, 3840x2160
7471 * resolution in base EDID preferred timing and 4096x2160
7472 * preferred resolution in DID extension block later.
7474 drm_mode_sort(&connector->probed_modes);
7475 amdgpu_dm_get_native_mode(connector);
7477 /* Freesync capabilities are reset by calling
7478 * drm_add_edid_modes() and need to be
7481 amdgpu_dm_update_freesync_caps(connector, edid);
7483 amdgpu_dm_connector->num_modes = 0;
7487 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7488 struct drm_display_mode *mode)
7490 struct drm_display_mode *m;
7492 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7493 if (drm_mode_equal(m, mode))
7500 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7502 const struct drm_display_mode *m;
7503 struct drm_display_mode *new_mode;
7505 uint32_t new_modes_count = 0;
7507 /* Standard FPS values
7516 * 60 - Commonly used
7517 * 48,72,96 - Multiples of 24
7519 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7520 48000, 50000, 60000, 72000, 96000 };
7523 * Find mode with highest refresh rate with the same resolution
7524 * as the preferred mode. Some monitors report a preferred mode
7525 * with lower resolution than the highest refresh rate supported.
7528 m = get_highest_refresh_rate_mode(aconnector, true);
7532 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7533 uint64_t target_vtotal, target_vtotal_diff;
7536 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7539 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7540 common_rates[i] > aconnector->max_vfreq * 1000)
7543 num = (unsigned long long)m->clock * 1000 * 1000;
7544 den = common_rates[i] * (unsigned long long)m->htotal;
7545 target_vtotal = div_u64(num, den);
7546 target_vtotal_diff = target_vtotal - m->vtotal;
7548 /* Check for illegal modes */
7549 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7550 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7551 m->vtotal + target_vtotal_diff < m->vsync_end)
7554 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7558 new_mode->vtotal += (u16)target_vtotal_diff;
7559 new_mode->vsync_start += (u16)target_vtotal_diff;
7560 new_mode->vsync_end += (u16)target_vtotal_diff;
7561 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7562 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7564 if (!is_duplicate_mode(aconnector, new_mode)) {
7565 drm_mode_probed_add(&aconnector->base, new_mode);
7566 new_modes_count += 1;
7568 drm_mode_destroy(aconnector->base.dev, new_mode);
7571 return new_modes_count;
7574 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7577 struct amdgpu_dm_connector *amdgpu_dm_connector =
7578 to_amdgpu_dm_connector(connector);
7580 if (!(amdgpu_freesync_vid_mode && edid))
7583 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7584 amdgpu_dm_connector->num_modes +=
7585 add_fs_modes(amdgpu_dm_connector);
7588 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7590 struct amdgpu_dm_connector *amdgpu_dm_connector =
7591 to_amdgpu_dm_connector(connector);
7592 struct drm_encoder *encoder;
7593 struct edid *edid = amdgpu_dm_connector->edid;
7595 encoder = amdgpu_dm_connector_to_encoder(connector);
7597 if (!drm_edid_is_valid(edid)) {
7598 amdgpu_dm_connector->num_modes =
7599 drm_add_modes_noedid(connector, 640, 480);
7601 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7602 amdgpu_dm_connector_add_common_modes(encoder, connector);
7603 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7605 amdgpu_dm_fbc_init(connector);
7607 return amdgpu_dm_connector->num_modes;
7610 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7611 struct amdgpu_dm_connector *aconnector,
7613 struct dc_link *link,
7616 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7619 * Some of the properties below require access to state, like bpc.
7620 * Allocate some default initial connector state with our reset helper.
7622 if (aconnector->base.funcs->reset)
7623 aconnector->base.funcs->reset(&aconnector->base);
7625 aconnector->connector_id = link_index;
7626 aconnector->dc_link = link;
7627 aconnector->base.interlace_allowed = false;
7628 aconnector->base.doublescan_allowed = false;
7629 aconnector->base.stereo_allowed = false;
7630 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7631 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7632 aconnector->audio_inst = -1;
7633 mutex_init(&aconnector->hpd_lock);
7636 * configure support HPD hot plug connector_>polled default value is 0
7637 * which means HPD hot plug not supported
7639 switch (connector_type) {
7640 case DRM_MODE_CONNECTOR_HDMIA:
7641 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7642 aconnector->base.ycbcr_420_allowed =
7643 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7645 case DRM_MODE_CONNECTOR_DisplayPort:
7646 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7647 aconnector->base.ycbcr_420_allowed =
7648 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7650 case DRM_MODE_CONNECTOR_DVID:
7651 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7657 drm_object_attach_property(&aconnector->base.base,
7658 dm->ddev->mode_config.scaling_mode_property,
7659 DRM_MODE_SCALE_NONE);
7661 drm_object_attach_property(&aconnector->base.base,
7662 adev->mode_info.underscan_property,
7664 drm_object_attach_property(&aconnector->base.base,
7665 adev->mode_info.underscan_hborder_property,
7667 drm_object_attach_property(&aconnector->base.base,
7668 adev->mode_info.underscan_vborder_property,
7671 if (!aconnector->mst_port)
7672 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7674 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7675 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7676 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7678 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7679 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7680 drm_object_attach_property(&aconnector->base.base,
7681 adev->mode_info.abm_level_property, 0);
7684 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7685 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7686 connector_type == DRM_MODE_CONNECTOR_eDP) {
7687 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7689 if (!aconnector->mst_port)
7690 drm_connector_attach_vrr_capable_property(&aconnector->base);
7692 #ifdef CONFIG_DRM_AMD_DC_HDCP
7693 if (adev->dm.hdcp_workqueue)
7694 drm_connector_attach_content_protection_property(&aconnector->base, true);
7699 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7700 struct i2c_msg *msgs, int num)
7702 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7703 struct ddc_service *ddc_service = i2c->ddc_service;
7704 struct i2c_command cmd;
7708 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7713 cmd.number_of_payloads = num;
7714 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7717 for (i = 0; i < num; i++) {
7718 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7719 cmd.payloads[i].address = msgs[i].addr;
7720 cmd.payloads[i].length = msgs[i].len;
7721 cmd.payloads[i].data = msgs[i].buf;
7725 ddc_service->ctx->dc,
7726 ddc_service->ddc_pin->hw_info.ddc_channel,
7730 kfree(cmd.payloads);
7734 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7736 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7739 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7740 .master_xfer = amdgpu_dm_i2c_xfer,
7741 .functionality = amdgpu_dm_i2c_func,
7744 static struct amdgpu_i2c_adapter *
7745 create_i2c(struct ddc_service *ddc_service,
7749 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7750 struct amdgpu_i2c_adapter *i2c;
7752 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7755 i2c->base.owner = THIS_MODULE;
7756 i2c->base.class = I2C_CLASS_DDC;
7757 i2c->base.dev.parent = &adev->pdev->dev;
7758 i2c->base.algo = &amdgpu_dm_i2c_algo;
7759 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7760 i2c_set_adapdata(&i2c->base, i2c);
7761 i2c->ddc_service = ddc_service;
7762 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7769 * Note: this function assumes that dc_link_detect() was called for the
7770 * dc_link which will be represented by this aconnector.
7772 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7773 struct amdgpu_dm_connector *aconnector,
7774 uint32_t link_index,
7775 struct amdgpu_encoder *aencoder)
7779 struct dc *dc = dm->dc;
7780 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7781 struct amdgpu_i2c_adapter *i2c;
7783 link->priv = aconnector;
7785 DRM_DEBUG_DRIVER("%s()\n", __func__);
7787 i2c = create_i2c(link->ddc, link->link_index, &res);
7789 DRM_ERROR("Failed to create i2c adapter data\n");
7793 aconnector->i2c = i2c;
7794 res = i2c_add_adapter(&i2c->base);
7797 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7801 connector_type = to_drm_connector_type(link->connector_signal);
7803 res = drm_connector_init_with_ddc(
7806 &amdgpu_dm_connector_funcs,
7811 DRM_ERROR("connector_init failed\n");
7812 aconnector->connector_id = -1;
7816 drm_connector_helper_add(
7818 &amdgpu_dm_connector_helper_funcs);
7820 amdgpu_dm_connector_init_helper(
7827 drm_connector_attach_encoder(
7828 &aconnector->base, &aencoder->base);
7830 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7831 || connector_type == DRM_MODE_CONNECTOR_eDP)
7832 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7837 aconnector->i2c = NULL;
7842 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7844 switch (adev->mode_info.num_crtc) {
7861 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7862 struct amdgpu_encoder *aencoder,
7863 uint32_t link_index)
7865 struct amdgpu_device *adev = drm_to_adev(dev);
7867 int res = drm_encoder_init(dev,
7869 &amdgpu_dm_encoder_funcs,
7870 DRM_MODE_ENCODER_TMDS,
7873 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7876 aencoder->encoder_id = link_index;
7878 aencoder->encoder_id = -1;
7880 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7885 static void manage_dm_interrupts(struct amdgpu_device *adev,
7886 struct amdgpu_crtc *acrtc,
7890 * We have no guarantee that the frontend index maps to the same
7891 * backend index - some even map to more than one.
7893 * TODO: Use a different interrupt or check DC itself for the mapping.
7896 amdgpu_display_crtc_idx_to_irq_type(
7901 drm_crtc_vblank_on(&acrtc->base);
7904 &adev->pageflip_irq,
7906 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7913 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7921 &adev->pageflip_irq,
7923 drm_crtc_vblank_off(&acrtc->base);
7927 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7928 struct amdgpu_crtc *acrtc)
7931 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7934 * This reads the current state for the IRQ and force reapplies
7935 * the setting to hardware.
7937 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7941 is_scaling_state_different(const struct dm_connector_state *dm_state,
7942 const struct dm_connector_state *old_dm_state)
7944 if (dm_state->scaling != old_dm_state->scaling)
7946 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7947 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7949 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7950 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7952 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7953 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7958 #ifdef CONFIG_DRM_AMD_DC_HDCP
7959 static bool is_content_protection_different(struct drm_connector_state *state,
7960 const struct drm_connector_state *old_state,
7961 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7963 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7964 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7966 /* Handle: Type0/1 change */
7967 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7968 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7969 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7973 /* CP is being re enabled, ignore this
7975 * Handles: ENABLED -> DESIRED
7977 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7978 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7979 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7983 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7985 * Handles: UNDESIRED -> ENABLED
7987 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7988 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7989 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7991 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7992 * hot-plug, headless s3, dpms
7994 * Handles: DESIRED -> DESIRED (Special case)
7996 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7997 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7998 dm_con_state->update_hdcp = false;
8003 * Handles: UNDESIRED -> UNDESIRED
8004 * DESIRED -> DESIRED
8005 * ENABLED -> ENABLED
8007 if (old_state->content_protection == state->content_protection)
8011 * Handles: UNDESIRED -> DESIRED
8012 * DESIRED -> UNDESIRED
8013 * ENABLED -> UNDESIRED
8015 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8019 * Handles: DESIRED -> ENABLED
8025 static void remove_stream(struct amdgpu_device *adev,
8026 struct amdgpu_crtc *acrtc,
8027 struct dc_stream_state *stream)
8029 /* this is the update mode case */
8031 acrtc->otg_inst = -1;
8032 acrtc->enabled = false;
8035 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8036 struct dc_cursor_position *position)
8038 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8040 int xorigin = 0, yorigin = 0;
8042 if (!crtc || !plane->state->fb)
8045 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8046 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8047 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8049 plane->state->crtc_w,
8050 plane->state->crtc_h);
8054 x = plane->state->crtc_x;
8055 y = plane->state->crtc_y;
8057 if (x <= -amdgpu_crtc->max_cursor_width ||
8058 y <= -amdgpu_crtc->max_cursor_height)
8062 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8066 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8069 position->enable = true;
8070 position->translate_by_source = true;
8073 position->x_hotspot = xorigin;
8074 position->y_hotspot = yorigin;
8079 static void handle_cursor_update(struct drm_plane *plane,
8080 struct drm_plane_state *old_plane_state)
8082 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8083 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8084 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8085 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8086 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8087 uint64_t address = afb ? afb->address : 0;
8088 struct dc_cursor_position position = {0};
8089 struct dc_cursor_attributes attributes;
8092 if (!plane->state->fb && !old_plane_state->fb)
8095 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8097 amdgpu_crtc->crtc_id,
8098 plane->state->crtc_w,
8099 plane->state->crtc_h);
8101 ret = get_cursor_position(plane, crtc, &position);
8105 if (!position.enable) {
8106 /* turn off cursor */
8107 if (crtc_state && crtc_state->stream) {
8108 mutex_lock(&adev->dm.dc_lock);
8109 dc_stream_set_cursor_position(crtc_state->stream,
8111 mutex_unlock(&adev->dm.dc_lock);
8116 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8117 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8119 memset(&attributes, 0, sizeof(attributes));
8120 attributes.address.high_part = upper_32_bits(address);
8121 attributes.address.low_part = lower_32_bits(address);
8122 attributes.width = plane->state->crtc_w;
8123 attributes.height = plane->state->crtc_h;
8124 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8125 attributes.rotation_angle = 0;
8126 attributes.attribute_flags.value = 0;
8128 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8130 if (crtc_state->stream) {
8131 mutex_lock(&adev->dm.dc_lock);
8132 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8134 DRM_ERROR("DC failed to set cursor attributes\n");
8136 if (!dc_stream_set_cursor_position(crtc_state->stream,
8138 DRM_ERROR("DC failed to set cursor position\n");
8139 mutex_unlock(&adev->dm.dc_lock);
8143 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8146 assert_spin_locked(&acrtc->base.dev->event_lock);
8147 WARN_ON(acrtc->event);
8149 acrtc->event = acrtc->base.state->event;
8151 /* Set the flip status */
8152 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8154 /* Mark this event as consumed */
8155 acrtc->base.state->event = NULL;
8157 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8161 static void update_freesync_state_on_stream(
8162 struct amdgpu_display_manager *dm,
8163 struct dm_crtc_state *new_crtc_state,
8164 struct dc_stream_state *new_stream,
8165 struct dc_plane_state *surface,
8166 u32 flip_timestamp_in_us)
8168 struct mod_vrr_params vrr_params;
8169 struct dc_info_packet vrr_infopacket = {0};
8170 struct amdgpu_device *adev = dm->adev;
8171 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8172 unsigned long flags;
8173 bool pack_sdp_v1_3 = false;
8179 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8180 * For now it's sufficient to just guard against these conditions.
8183 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8186 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8187 vrr_params = acrtc->dm_irq_params.vrr_params;
8190 mod_freesync_handle_preflip(
8191 dm->freesync_module,
8194 flip_timestamp_in_us,
8197 if (adev->family < AMDGPU_FAMILY_AI &&
8198 amdgpu_dm_vrr_active(new_crtc_state)) {
8199 mod_freesync_handle_v_update(dm->freesync_module,
8200 new_stream, &vrr_params);
8202 /* Need to call this before the frame ends. */
8203 dc_stream_adjust_vmin_vmax(dm->dc,
8204 new_crtc_state->stream,
8205 &vrr_params.adjust);
8209 mod_freesync_build_vrr_infopacket(
8210 dm->freesync_module,
8214 TRANSFER_FUNC_UNKNOWN,
8218 new_crtc_state->freesync_timing_changed |=
8219 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8221 sizeof(vrr_params.adjust)) != 0);
8223 new_crtc_state->freesync_vrr_info_changed |=
8224 (memcmp(&new_crtc_state->vrr_infopacket,
8226 sizeof(vrr_infopacket)) != 0);
8228 acrtc->dm_irq_params.vrr_params = vrr_params;
8229 new_crtc_state->vrr_infopacket = vrr_infopacket;
8231 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8232 new_stream->vrr_infopacket = vrr_infopacket;
8234 if (new_crtc_state->freesync_vrr_info_changed)
8235 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8236 new_crtc_state->base.crtc->base.id,
8237 (int)new_crtc_state->base.vrr_enabled,
8238 (int)vrr_params.state);
8240 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8243 static void update_stream_irq_parameters(
8244 struct amdgpu_display_manager *dm,
8245 struct dm_crtc_state *new_crtc_state)
8247 struct dc_stream_state *new_stream = new_crtc_state->stream;
8248 struct mod_vrr_params vrr_params;
8249 struct mod_freesync_config config = new_crtc_state->freesync_config;
8250 struct amdgpu_device *adev = dm->adev;
8251 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8252 unsigned long flags;
8258 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8259 * For now it's sufficient to just guard against these conditions.
8261 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8264 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8265 vrr_params = acrtc->dm_irq_params.vrr_params;
8267 if (new_crtc_state->vrr_supported &&
8268 config.min_refresh_in_uhz &&
8269 config.max_refresh_in_uhz) {
8271 * if freesync compatible mode was set, config.state will be set
8274 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8275 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8276 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8277 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8278 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8279 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8280 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8282 config.state = new_crtc_state->base.vrr_enabled ?
8283 VRR_STATE_ACTIVE_VARIABLE :
8287 config.state = VRR_STATE_UNSUPPORTED;
8290 mod_freesync_build_vrr_params(dm->freesync_module,
8292 &config, &vrr_params);
8294 new_crtc_state->freesync_timing_changed |=
8295 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8296 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8298 new_crtc_state->freesync_config = config;
8299 /* Copy state for access from DM IRQ handler */
8300 acrtc->dm_irq_params.freesync_config = config;
8301 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8302 acrtc->dm_irq_params.vrr_params = vrr_params;
8303 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8306 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8307 struct dm_crtc_state *new_state)
8309 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8310 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8312 if (!old_vrr_active && new_vrr_active) {
8313 /* Transition VRR inactive -> active:
8314 * While VRR is active, we must not disable vblank irq, as a
8315 * reenable after disable would compute bogus vblank/pflip
8316 * timestamps if it likely happened inside display front-porch.
8318 * We also need vupdate irq for the actual core vblank handling
8321 dm_set_vupdate_irq(new_state->base.crtc, true);
8322 drm_crtc_vblank_get(new_state->base.crtc);
8323 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8324 __func__, new_state->base.crtc->base.id);
8325 } else if (old_vrr_active && !new_vrr_active) {
8326 /* Transition VRR active -> inactive:
8327 * Allow vblank irq disable again for fixed refresh rate.
8329 dm_set_vupdate_irq(new_state->base.crtc, false);
8330 drm_crtc_vblank_put(new_state->base.crtc);
8331 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8332 __func__, new_state->base.crtc->base.id);
8336 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8338 struct drm_plane *plane;
8339 struct drm_plane_state *old_plane_state;
8343 * TODO: Make this per-stream so we don't issue redundant updates for
8344 * commits with multiple streams.
8346 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8347 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8348 handle_cursor_update(plane, old_plane_state);
8351 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8352 struct dc_state *dc_state,
8353 struct drm_device *dev,
8354 struct amdgpu_display_manager *dm,
8355 struct drm_crtc *pcrtc,
8356 bool wait_for_vblank)
8359 uint64_t timestamp_ns;
8360 struct drm_plane *plane;
8361 struct drm_plane_state *old_plane_state, *new_plane_state;
8362 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8363 struct drm_crtc_state *new_pcrtc_state =
8364 drm_atomic_get_new_crtc_state(state, pcrtc);
8365 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8366 struct dm_crtc_state *dm_old_crtc_state =
8367 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8368 int planes_count = 0, vpos, hpos;
8370 unsigned long flags;
8371 struct amdgpu_bo *abo;
8372 uint32_t target_vblank, last_flip_vblank;
8373 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8374 bool pflip_present = false;
8376 struct dc_surface_update surface_updates[MAX_SURFACES];
8377 struct dc_plane_info plane_infos[MAX_SURFACES];
8378 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8379 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8380 struct dc_stream_update stream_update;
8383 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8386 dm_error("Failed to allocate update bundle\n");
8391 * Disable the cursor first if we're disabling all the planes.
8392 * It'll remain on the screen after the planes are re-enabled
8395 if (acrtc_state->active_planes == 0)
8396 amdgpu_dm_commit_cursors(state);
8398 /* update planes when needed */
8399 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8400 struct drm_crtc *crtc = new_plane_state->crtc;
8401 struct drm_crtc_state *new_crtc_state;
8402 struct drm_framebuffer *fb = new_plane_state->fb;
8403 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8404 bool plane_needs_flip;
8405 struct dc_plane_state *dc_plane;
8406 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8408 /* Cursor plane is handled after stream updates */
8409 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8412 if (!fb || !crtc || pcrtc != crtc)
8415 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8416 if (!new_crtc_state->active)
8419 dc_plane = dm_new_plane_state->dc_state;
8421 bundle->surface_updates[planes_count].surface = dc_plane;
8422 if (new_pcrtc_state->color_mgmt_changed) {
8423 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8424 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8425 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8428 fill_dc_scaling_info(new_plane_state,
8429 &bundle->scaling_infos[planes_count]);
8431 bundle->surface_updates[planes_count].scaling_info =
8432 &bundle->scaling_infos[planes_count];
8434 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8436 pflip_present = pflip_present || plane_needs_flip;
8438 if (!plane_needs_flip) {
8443 abo = gem_to_amdgpu_bo(fb->obj[0]);
8446 * Wait for all fences on this FB. Do limited wait to avoid
8447 * deadlock during GPU reset when this fence will not signal
8448 * but we hold reservation lock for the BO.
8450 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8452 msecs_to_jiffies(5000));
8453 if (unlikely(r <= 0))
8454 DRM_ERROR("Waiting for fences timed out!");
8456 fill_dc_plane_info_and_addr(
8457 dm->adev, new_plane_state,
8459 &bundle->plane_infos[planes_count],
8460 &bundle->flip_addrs[planes_count].address,
8461 afb->tmz_surface, false);
8463 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8464 new_plane_state->plane->index,
8465 bundle->plane_infos[planes_count].dcc.enable);
8467 bundle->surface_updates[planes_count].plane_info =
8468 &bundle->plane_infos[planes_count];
8471 * Only allow immediate flips for fast updates that don't
8472 * change FB pitch, DCC state, rotation or mirroing.
8474 bundle->flip_addrs[planes_count].flip_immediate =
8475 crtc->state->async_flip &&
8476 acrtc_state->update_type == UPDATE_TYPE_FAST;
8478 timestamp_ns = ktime_get_ns();
8479 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8480 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8481 bundle->surface_updates[planes_count].surface = dc_plane;
8483 if (!bundle->surface_updates[planes_count].surface) {
8484 DRM_ERROR("No surface for CRTC: id=%d\n",
8485 acrtc_attach->crtc_id);
8489 if (plane == pcrtc->primary)
8490 update_freesync_state_on_stream(
8493 acrtc_state->stream,
8495 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8497 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8499 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8500 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8506 if (pflip_present) {
8508 /* Use old throttling in non-vrr fixed refresh rate mode
8509 * to keep flip scheduling based on target vblank counts
8510 * working in a backwards compatible way, e.g., for
8511 * clients using the GLX_OML_sync_control extension or
8512 * DRI3/Present extension with defined target_msc.
8514 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8517 /* For variable refresh rate mode only:
8518 * Get vblank of last completed flip to avoid > 1 vrr
8519 * flips per video frame by use of throttling, but allow
8520 * flip programming anywhere in the possibly large
8521 * variable vrr vblank interval for fine-grained flip
8522 * timing control and more opportunity to avoid stutter
8523 * on late submission of flips.
8525 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8526 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8527 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8530 target_vblank = last_flip_vblank + wait_for_vblank;
8533 * Wait until we're out of the vertical blank period before the one
8534 * targeted by the flip
8536 while ((acrtc_attach->enabled &&
8537 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8538 0, &vpos, &hpos, NULL,
8539 NULL, &pcrtc->hwmode)
8540 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8541 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8542 (int)(target_vblank -
8543 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8544 usleep_range(1000, 1100);
8548 * Prepare the flip event for the pageflip interrupt to handle.
8550 * This only works in the case where we've already turned on the
8551 * appropriate hardware blocks (eg. HUBP) so in the transition case
8552 * from 0 -> n planes we have to skip a hardware generated event
8553 * and rely on sending it from software.
8555 if (acrtc_attach->base.state->event &&
8556 acrtc_state->active_planes > 0) {
8557 drm_crtc_vblank_get(pcrtc);
8559 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8561 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8562 prepare_flip_isr(acrtc_attach);
8564 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8567 if (acrtc_state->stream) {
8568 if (acrtc_state->freesync_vrr_info_changed)
8569 bundle->stream_update.vrr_infopacket =
8570 &acrtc_state->stream->vrr_infopacket;
8574 /* Update the planes if changed or disable if we don't have any. */
8575 if ((planes_count || acrtc_state->active_planes == 0) &&
8576 acrtc_state->stream) {
8577 bundle->stream_update.stream = acrtc_state->stream;
8578 if (new_pcrtc_state->mode_changed) {
8579 bundle->stream_update.src = acrtc_state->stream->src;
8580 bundle->stream_update.dst = acrtc_state->stream->dst;
8583 if (new_pcrtc_state->color_mgmt_changed) {
8585 * TODO: This isn't fully correct since we've actually
8586 * already modified the stream in place.
8588 bundle->stream_update.gamut_remap =
8589 &acrtc_state->stream->gamut_remap_matrix;
8590 bundle->stream_update.output_csc_transform =
8591 &acrtc_state->stream->csc_color_matrix;
8592 bundle->stream_update.out_transfer_func =
8593 acrtc_state->stream->out_transfer_func;
8596 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8597 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8598 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8601 * If FreeSync state on the stream has changed then we need to
8602 * re-adjust the min/max bounds now that DC doesn't handle this
8603 * as part of commit.
8605 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8606 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8607 dc_stream_adjust_vmin_vmax(
8608 dm->dc, acrtc_state->stream,
8609 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8610 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8612 mutex_lock(&dm->dc_lock);
8613 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8614 acrtc_state->stream->link->psr_settings.psr_allow_active)
8615 amdgpu_dm_psr_disable(acrtc_state->stream);
8617 dc_commit_updates_for_stream(dm->dc,
8618 bundle->surface_updates,
8620 acrtc_state->stream,
8621 &bundle->stream_update,
8625 * Enable or disable the interrupts on the backend.
8627 * Most pipes are put into power gating when unused.
8629 * When power gating is enabled on a pipe we lose the
8630 * interrupt enablement state when power gating is disabled.
8632 * So we need to update the IRQ control state in hardware
8633 * whenever the pipe turns on (since it could be previously
8634 * power gated) or off (since some pipes can't be power gated
8637 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8638 dm_update_pflip_irq_state(drm_to_adev(dev),
8641 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8642 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8643 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8644 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8645 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8646 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8647 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8648 amdgpu_dm_psr_enable(acrtc_state->stream);
8651 mutex_unlock(&dm->dc_lock);
8655 * Update cursor state *after* programming all the planes.
8656 * This avoids redundant programming in the case where we're going
8657 * to be disabling a single plane - those pipes are being disabled.
8659 if (acrtc_state->active_planes)
8660 amdgpu_dm_commit_cursors(state);
8666 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8667 struct drm_atomic_state *state)
8669 struct amdgpu_device *adev = drm_to_adev(dev);
8670 struct amdgpu_dm_connector *aconnector;
8671 struct drm_connector *connector;
8672 struct drm_connector_state *old_con_state, *new_con_state;
8673 struct drm_crtc_state *new_crtc_state;
8674 struct dm_crtc_state *new_dm_crtc_state;
8675 const struct dc_stream_status *status;
8678 /* Notify device removals. */
8679 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8680 if (old_con_state->crtc != new_con_state->crtc) {
8681 /* CRTC changes require notification. */
8685 if (!new_con_state->crtc)
8688 new_crtc_state = drm_atomic_get_new_crtc_state(
8689 state, new_con_state->crtc);
8691 if (!new_crtc_state)
8694 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8698 aconnector = to_amdgpu_dm_connector(connector);
8700 mutex_lock(&adev->dm.audio_lock);
8701 inst = aconnector->audio_inst;
8702 aconnector->audio_inst = -1;
8703 mutex_unlock(&adev->dm.audio_lock);
8705 amdgpu_dm_audio_eld_notify(adev, inst);
8708 /* Notify audio device additions. */
8709 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8710 if (!new_con_state->crtc)
8713 new_crtc_state = drm_atomic_get_new_crtc_state(
8714 state, new_con_state->crtc);
8716 if (!new_crtc_state)
8719 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8722 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8723 if (!new_dm_crtc_state->stream)
8726 status = dc_stream_get_status(new_dm_crtc_state->stream);
8730 aconnector = to_amdgpu_dm_connector(connector);
8732 mutex_lock(&adev->dm.audio_lock);
8733 inst = status->audio_inst;
8734 aconnector->audio_inst = inst;
8735 mutex_unlock(&adev->dm.audio_lock);
8737 amdgpu_dm_audio_eld_notify(adev, inst);
8742 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8743 * @crtc_state: the DRM CRTC state
8744 * @stream_state: the DC stream state.
8746 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8747 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8749 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8750 struct dc_stream_state *stream_state)
8752 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8756 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8757 * @state: The atomic state to commit
8759 * This will tell DC to commit the constructed DC state from atomic_check,
8760 * programming the hardware. Any failures here implies a hardware failure, since
8761 * atomic check should have filtered anything non-kosher.
8763 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8765 struct drm_device *dev = state->dev;
8766 struct amdgpu_device *adev = drm_to_adev(dev);
8767 struct amdgpu_display_manager *dm = &adev->dm;
8768 struct dm_atomic_state *dm_state;
8769 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8771 struct drm_crtc *crtc;
8772 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8773 unsigned long flags;
8774 bool wait_for_vblank = true;
8775 struct drm_connector *connector;
8776 struct drm_connector_state *old_con_state, *new_con_state;
8777 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8778 int crtc_disable_count = 0;
8779 bool mode_set_reset_required = false;
8781 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8783 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8785 dm_state = dm_atomic_get_new_state(state);
8786 if (dm_state && dm_state->context) {
8787 dc_state = dm_state->context;
8789 /* No state changes, retain current state. */
8790 dc_state_temp = dc_create_state(dm->dc);
8791 ASSERT(dc_state_temp);
8792 dc_state = dc_state_temp;
8793 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8796 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8797 new_crtc_state, i) {
8798 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8800 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8802 if (old_crtc_state->active &&
8803 (!new_crtc_state->active ||
8804 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8805 manage_dm_interrupts(adev, acrtc, false);
8806 dc_stream_release(dm_old_crtc_state->stream);
8810 drm_atomic_helper_calc_timestamping_constants(state);
8812 /* update changed items */
8813 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8814 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8816 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8817 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8820 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8821 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8822 "connectors_changed:%d\n",
8824 new_crtc_state->enable,
8825 new_crtc_state->active,
8826 new_crtc_state->planes_changed,
8827 new_crtc_state->mode_changed,
8828 new_crtc_state->active_changed,
8829 new_crtc_state->connectors_changed);
8831 /* Disable cursor if disabling crtc */
8832 if (old_crtc_state->active && !new_crtc_state->active) {
8833 struct dc_cursor_position position;
8835 memset(&position, 0, sizeof(position));
8836 mutex_lock(&dm->dc_lock);
8837 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8838 mutex_unlock(&dm->dc_lock);
8841 /* Copy all transient state flags into dc state */
8842 if (dm_new_crtc_state->stream) {
8843 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8844 dm_new_crtc_state->stream);
8847 /* handles headless hotplug case, updating new_state and
8848 * aconnector as needed
8851 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8853 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8855 if (!dm_new_crtc_state->stream) {
8857 * this could happen because of issues with
8858 * userspace notifications delivery.
8859 * In this case userspace tries to set mode on
8860 * display which is disconnected in fact.
8861 * dc_sink is NULL in this case on aconnector.
8862 * We expect reset mode will come soon.
8864 * This can also happen when unplug is done
8865 * during resume sequence ended
8867 * In this case, we want to pretend we still
8868 * have a sink to keep the pipe running so that
8869 * hw state is consistent with the sw state
8871 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8872 __func__, acrtc->base.base.id);
8876 if (dm_old_crtc_state->stream)
8877 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8879 pm_runtime_get_noresume(dev->dev);
8881 acrtc->enabled = true;
8882 acrtc->hw_mode = new_crtc_state->mode;
8883 crtc->hwmode = new_crtc_state->mode;
8884 mode_set_reset_required = true;
8885 } else if (modereset_required(new_crtc_state)) {
8886 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8887 /* i.e. reset mode */
8888 if (dm_old_crtc_state->stream)
8889 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8891 mode_set_reset_required = true;
8893 } /* for_each_crtc_in_state() */
8896 /* if there mode set or reset, disable eDP PSR */
8897 if (mode_set_reset_required)
8898 amdgpu_dm_psr_disable_all(dm);
8900 dm_enable_per_frame_crtc_master_sync(dc_state);
8901 mutex_lock(&dm->dc_lock);
8902 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8903 #if defined(CONFIG_DRM_AMD_DC_DCN)
8904 /* Allow idle optimization when vblank count is 0 for display off */
8905 if (dm->active_vblank_irq_count == 0)
8906 dc_allow_idle_optimizations(dm->dc,true);
8908 mutex_unlock(&dm->dc_lock);
8911 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8912 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8914 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8916 if (dm_new_crtc_state->stream != NULL) {
8917 const struct dc_stream_status *status =
8918 dc_stream_get_status(dm_new_crtc_state->stream);
8921 status = dc_stream_get_status_from_state(dc_state,
8922 dm_new_crtc_state->stream);
8924 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8926 acrtc->otg_inst = status->primary_otg_inst;
8929 #ifdef CONFIG_DRM_AMD_DC_HDCP
8930 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8931 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8932 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8933 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8935 new_crtc_state = NULL;
8938 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8940 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8942 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8943 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8944 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8945 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8946 dm_new_con_state->update_hdcp = true;
8950 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8951 hdcp_update_display(
8952 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8953 new_con_state->hdcp_content_type,
8954 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8958 /* Handle connector state changes */
8959 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8960 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8961 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8962 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8963 struct dc_surface_update dummy_updates[MAX_SURFACES];
8964 struct dc_stream_update stream_update;
8965 struct dc_info_packet hdr_packet;
8966 struct dc_stream_status *status = NULL;
8967 bool abm_changed, hdr_changed, scaling_changed;
8969 memset(&dummy_updates, 0, sizeof(dummy_updates));
8970 memset(&stream_update, 0, sizeof(stream_update));
8973 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8974 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8977 /* Skip any modesets/resets */
8978 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8981 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8982 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8984 scaling_changed = is_scaling_state_different(dm_new_con_state,
8987 abm_changed = dm_new_crtc_state->abm_level !=
8988 dm_old_crtc_state->abm_level;
8991 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8993 if (!scaling_changed && !abm_changed && !hdr_changed)
8996 stream_update.stream = dm_new_crtc_state->stream;
8997 if (scaling_changed) {
8998 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8999 dm_new_con_state, dm_new_crtc_state->stream);
9001 stream_update.src = dm_new_crtc_state->stream->src;
9002 stream_update.dst = dm_new_crtc_state->stream->dst;
9006 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9008 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9012 fill_hdr_info_packet(new_con_state, &hdr_packet);
9013 stream_update.hdr_static_metadata = &hdr_packet;
9016 status = dc_stream_get_status(dm_new_crtc_state->stream);
9018 if (WARN_ON(!status))
9021 WARN_ON(!status->plane_count);
9024 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9025 * Here we create an empty update on each plane.
9026 * To fix this, DC should permit updating only stream properties.
9028 for (j = 0; j < status->plane_count; j++)
9029 dummy_updates[j].surface = status->plane_states[0];
9032 mutex_lock(&dm->dc_lock);
9033 dc_commit_updates_for_stream(dm->dc,
9035 status->plane_count,
9036 dm_new_crtc_state->stream,
9039 mutex_unlock(&dm->dc_lock);
9042 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9043 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9044 new_crtc_state, i) {
9045 if (old_crtc_state->active && !new_crtc_state->active)
9046 crtc_disable_count++;
9048 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9049 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9051 /* For freesync config update on crtc state and params for irq */
9052 update_stream_irq_parameters(dm, dm_new_crtc_state);
9054 /* Handle vrr on->off / off->on transitions */
9055 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9060 * Enable interrupts for CRTCs that are newly enabled or went through
9061 * a modeset. It was intentionally deferred until after the front end
9062 * state was modified to wait until the OTG was on and so the IRQ
9063 * handlers didn't access stale or invalid state.
9065 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9066 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9067 #ifdef CONFIG_DEBUG_FS
9068 bool configure_crc = false;
9069 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9070 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9071 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9073 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9074 cur_crc_src = acrtc->dm_irq_params.crc_src;
9075 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9077 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9079 if (new_crtc_state->active &&
9080 (!old_crtc_state->active ||
9081 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9082 dc_stream_retain(dm_new_crtc_state->stream);
9083 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9084 manage_dm_interrupts(adev, acrtc, true);
9086 #ifdef CONFIG_DEBUG_FS
9088 * Frontend may have changed so reapply the CRC capture
9089 * settings for the stream.
9091 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9093 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9094 configure_crc = true;
9095 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9096 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9097 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9098 acrtc->dm_irq_params.crc_window.update_win = true;
9099 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9100 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9101 crc_rd_wrk->crtc = crtc;
9102 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9103 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9109 if (amdgpu_dm_crtc_configure_crc_source(
9110 crtc, dm_new_crtc_state, cur_crc_src))
9111 DRM_DEBUG_DRIVER("Failed to configure crc source");
9116 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9117 if (new_crtc_state->async_flip)
9118 wait_for_vblank = false;
9120 /* update planes when needed per crtc*/
9121 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9122 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9124 if (dm_new_crtc_state->stream)
9125 amdgpu_dm_commit_planes(state, dc_state, dev,
9126 dm, crtc, wait_for_vblank);
9129 /* Update audio instances for each connector. */
9130 amdgpu_dm_commit_audio(dev, state);
9132 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9133 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9134 /* restore the backlight level */
9135 if (dm->backlight_dev)
9136 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9139 * send vblank event on all events not handled in flip and
9140 * mark consumed event for drm_atomic_helper_commit_hw_done
9142 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9143 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9145 if (new_crtc_state->event)
9146 drm_send_event_locked(dev, &new_crtc_state->event->base);
9148 new_crtc_state->event = NULL;
9150 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9152 /* Signal HW programming completion */
9153 drm_atomic_helper_commit_hw_done(state);
9155 if (wait_for_vblank)
9156 drm_atomic_helper_wait_for_flip_done(dev, state);
9158 drm_atomic_helper_cleanup_planes(dev, state);
9160 /* return the stolen vga memory back to VRAM */
9161 if (!adev->mman.keep_stolen_vga_memory)
9162 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9163 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9166 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9167 * so we can put the GPU into runtime suspend if we're not driving any
9170 for (i = 0; i < crtc_disable_count; i++)
9171 pm_runtime_put_autosuspend(dev->dev);
9172 pm_runtime_mark_last_busy(dev->dev);
9175 dc_release_state(dc_state_temp);
9179 static int dm_force_atomic_commit(struct drm_connector *connector)
9182 struct drm_device *ddev = connector->dev;
9183 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9184 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9185 struct drm_plane *plane = disconnected_acrtc->base.primary;
9186 struct drm_connector_state *conn_state;
9187 struct drm_crtc_state *crtc_state;
9188 struct drm_plane_state *plane_state;
9193 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9195 /* Construct an atomic state to restore previous display setting */
9198 * Attach connectors to drm_atomic_state
9200 conn_state = drm_atomic_get_connector_state(state, connector);
9202 ret = PTR_ERR_OR_ZERO(conn_state);
9206 /* Attach crtc to drm_atomic_state*/
9207 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9209 ret = PTR_ERR_OR_ZERO(crtc_state);
9213 /* force a restore */
9214 crtc_state->mode_changed = true;
9216 /* Attach plane to drm_atomic_state */
9217 plane_state = drm_atomic_get_plane_state(state, plane);
9219 ret = PTR_ERR_OR_ZERO(plane_state);
9223 /* Call commit internally with the state we just constructed */
9224 ret = drm_atomic_commit(state);
9227 drm_atomic_state_put(state);
9229 DRM_ERROR("Restoring old state failed with %i\n", ret);
9235 * This function handles all cases when set mode does not come upon hotplug.
9236 * This includes when a display is unplugged then plugged back into the
9237 * same port and when running without usermode desktop manager supprot
9239 void dm_restore_drm_connector_state(struct drm_device *dev,
9240 struct drm_connector *connector)
9242 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9243 struct amdgpu_crtc *disconnected_acrtc;
9244 struct dm_crtc_state *acrtc_state;
9246 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9249 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9250 if (!disconnected_acrtc)
9253 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9254 if (!acrtc_state->stream)
9258 * If the previous sink is not released and different from the current,
9259 * we deduce we are in a state where we can not rely on usermode call
9260 * to turn on the display, so we do it here
9262 if (acrtc_state->stream->sink != aconnector->dc_sink)
9263 dm_force_atomic_commit(&aconnector->base);
9267 * Grabs all modesetting locks to serialize against any blocking commits,
9268 * Waits for completion of all non blocking commits.
9270 static int do_aquire_global_lock(struct drm_device *dev,
9271 struct drm_atomic_state *state)
9273 struct drm_crtc *crtc;
9274 struct drm_crtc_commit *commit;
9278 * Adding all modeset locks to aquire_ctx will
9279 * ensure that when the framework release it the
9280 * extra locks we are locking here will get released to
9282 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9286 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9287 spin_lock(&crtc->commit_lock);
9288 commit = list_first_entry_or_null(&crtc->commit_list,
9289 struct drm_crtc_commit, commit_entry);
9291 drm_crtc_commit_get(commit);
9292 spin_unlock(&crtc->commit_lock);
9298 * Make sure all pending HW programming completed and
9301 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9304 ret = wait_for_completion_interruptible_timeout(
9305 &commit->flip_done, 10*HZ);
9308 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9309 "timed out\n", crtc->base.id, crtc->name);
9311 drm_crtc_commit_put(commit);
9314 return ret < 0 ? ret : 0;
9317 static void get_freesync_config_for_crtc(
9318 struct dm_crtc_state *new_crtc_state,
9319 struct dm_connector_state *new_con_state)
9321 struct mod_freesync_config config = {0};
9322 struct amdgpu_dm_connector *aconnector =
9323 to_amdgpu_dm_connector(new_con_state->base.connector);
9324 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9325 int vrefresh = drm_mode_vrefresh(mode);
9326 bool fs_vid_mode = false;
9328 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9329 vrefresh >= aconnector->min_vfreq &&
9330 vrefresh <= aconnector->max_vfreq;
9332 if (new_crtc_state->vrr_supported) {
9333 new_crtc_state->stream->ignore_msa_timing_param = true;
9334 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9336 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9337 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9338 config.vsif_supported = true;
9342 config.state = VRR_STATE_ACTIVE_FIXED;
9343 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9345 } else if (new_crtc_state->base.vrr_enabled) {
9346 config.state = VRR_STATE_ACTIVE_VARIABLE;
9348 config.state = VRR_STATE_INACTIVE;
9352 new_crtc_state->freesync_config = config;
9355 static void reset_freesync_config_for_crtc(
9356 struct dm_crtc_state *new_crtc_state)
9358 new_crtc_state->vrr_supported = false;
9360 memset(&new_crtc_state->vrr_infopacket, 0,
9361 sizeof(new_crtc_state->vrr_infopacket));
9365 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9366 struct drm_crtc_state *new_crtc_state)
9368 struct drm_display_mode old_mode, new_mode;
9370 if (!old_crtc_state || !new_crtc_state)
9373 old_mode = old_crtc_state->mode;
9374 new_mode = new_crtc_state->mode;
9376 if (old_mode.clock == new_mode.clock &&
9377 old_mode.hdisplay == new_mode.hdisplay &&
9378 old_mode.vdisplay == new_mode.vdisplay &&
9379 old_mode.htotal == new_mode.htotal &&
9380 old_mode.vtotal != new_mode.vtotal &&
9381 old_mode.hsync_start == new_mode.hsync_start &&
9382 old_mode.vsync_start != new_mode.vsync_start &&
9383 old_mode.hsync_end == new_mode.hsync_end &&
9384 old_mode.vsync_end != new_mode.vsync_end &&
9385 old_mode.hskew == new_mode.hskew &&
9386 old_mode.vscan == new_mode.vscan &&
9387 (old_mode.vsync_end - old_mode.vsync_start) ==
9388 (new_mode.vsync_end - new_mode.vsync_start))
9394 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9395 uint64_t num, den, res;
9396 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9398 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9400 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9401 den = (unsigned long long)new_crtc_state->mode.htotal *
9402 (unsigned long long)new_crtc_state->mode.vtotal;
9404 res = div_u64(num, den);
9405 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9408 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9409 struct drm_atomic_state *state,
9410 struct drm_crtc *crtc,
9411 struct drm_crtc_state *old_crtc_state,
9412 struct drm_crtc_state *new_crtc_state,
9414 bool *lock_and_validation_needed)
9416 struct dm_atomic_state *dm_state = NULL;
9417 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9418 struct dc_stream_state *new_stream;
9422 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9423 * update changed items
9425 struct amdgpu_crtc *acrtc = NULL;
9426 struct amdgpu_dm_connector *aconnector = NULL;
9427 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9428 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9432 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9433 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9434 acrtc = to_amdgpu_crtc(crtc);
9435 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9437 /* TODO This hack should go away */
9438 if (aconnector && enable) {
9439 /* Make sure fake sink is created in plug-in scenario */
9440 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9442 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9445 if (IS_ERR(drm_new_conn_state)) {
9446 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9450 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9451 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9453 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9456 new_stream = create_validate_stream_for_sink(aconnector,
9457 &new_crtc_state->mode,
9459 dm_old_crtc_state->stream);
9462 * we can have no stream on ACTION_SET if a display
9463 * was disconnected during S3, in this case it is not an
9464 * error, the OS will be updated after detection, and
9465 * will do the right thing on next atomic commit
9469 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9470 __func__, acrtc->base.base.id);
9476 * TODO: Check VSDB bits to decide whether this should
9477 * be enabled or not.
9479 new_stream->triggered_crtc_reset.enabled =
9480 dm->force_timing_sync;
9482 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9484 ret = fill_hdr_info_packet(drm_new_conn_state,
9485 &new_stream->hdr_static_metadata);
9490 * If we already removed the old stream from the context
9491 * (and set the new stream to NULL) then we can't reuse
9492 * the old stream even if the stream and scaling are unchanged.
9493 * We'll hit the BUG_ON and black screen.
9495 * TODO: Refactor this function to allow this check to work
9496 * in all conditions.
9498 if (amdgpu_freesync_vid_mode &&
9499 dm_new_crtc_state->stream &&
9500 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9503 if (dm_new_crtc_state->stream &&
9504 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9505 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9506 new_crtc_state->mode_changed = false;
9507 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9508 new_crtc_state->mode_changed);
9512 /* mode_changed flag may get updated above, need to check again */
9513 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9517 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9518 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9519 "connectors_changed:%d\n",
9521 new_crtc_state->enable,
9522 new_crtc_state->active,
9523 new_crtc_state->planes_changed,
9524 new_crtc_state->mode_changed,
9525 new_crtc_state->active_changed,
9526 new_crtc_state->connectors_changed);
9528 /* Remove stream for any changed/disabled CRTC */
9531 if (!dm_old_crtc_state->stream)
9534 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9535 is_timing_unchanged_for_freesync(new_crtc_state,
9537 new_crtc_state->mode_changed = false;
9539 "Mode change not required for front porch change, "
9540 "setting mode_changed to %d",
9541 new_crtc_state->mode_changed);
9543 set_freesync_fixed_config(dm_new_crtc_state);
9546 } else if (amdgpu_freesync_vid_mode && aconnector &&
9547 is_freesync_video_mode(&new_crtc_state->mode,
9549 set_freesync_fixed_config(dm_new_crtc_state);
9552 ret = dm_atomic_get_state(state, &dm_state);
9556 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9559 /* i.e. reset mode */
9560 if (dc_remove_stream_from_ctx(
9563 dm_old_crtc_state->stream) != DC_OK) {
9568 dc_stream_release(dm_old_crtc_state->stream);
9569 dm_new_crtc_state->stream = NULL;
9571 reset_freesync_config_for_crtc(dm_new_crtc_state);
9573 *lock_and_validation_needed = true;
9575 } else {/* Add stream for any updated/enabled CRTC */
9577 * Quick fix to prevent NULL pointer on new_stream when
9578 * added MST connectors not found in existing crtc_state in the chained mode
9579 * TODO: need to dig out the root cause of that
9581 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9584 if (modereset_required(new_crtc_state))
9587 if (modeset_required(new_crtc_state, new_stream,
9588 dm_old_crtc_state->stream)) {
9590 WARN_ON(dm_new_crtc_state->stream);
9592 ret = dm_atomic_get_state(state, &dm_state);
9596 dm_new_crtc_state->stream = new_stream;
9598 dc_stream_retain(new_stream);
9600 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9603 if (dc_add_stream_to_ctx(
9606 dm_new_crtc_state->stream) != DC_OK) {
9611 *lock_and_validation_needed = true;
9616 /* Release extra reference */
9618 dc_stream_release(new_stream);
9621 * We want to do dc stream updates that do not require a
9622 * full modeset below.
9624 if (!(enable && aconnector && new_crtc_state->active))
9627 * Given above conditions, the dc state cannot be NULL because:
9628 * 1. We're in the process of enabling CRTCs (just been added
9629 * to the dc context, or already is on the context)
9630 * 2. Has a valid connector attached, and
9631 * 3. Is currently active and enabled.
9632 * => The dc stream state currently exists.
9634 BUG_ON(dm_new_crtc_state->stream == NULL);
9636 /* Scaling or underscan settings */
9637 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9638 update_stream_scaling_settings(
9639 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9642 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9645 * Color management settings. We also update color properties
9646 * when a modeset is needed, to ensure it gets reprogrammed.
9648 if (dm_new_crtc_state->base.color_mgmt_changed ||
9649 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9650 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9655 /* Update Freesync settings. */
9656 get_freesync_config_for_crtc(dm_new_crtc_state,
9663 dc_stream_release(new_stream);
9667 static bool should_reset_plane(struct drm_atomic_state *state,
9668 struct drm_plane *plane,
9669 struct drm_plane_state *old_plane_state,
9670 struct drm_plane_state *new_plane_state)
9672 struct drm_plane *other;
9673 struct drm_plane_state *old_other_state, *new_other_state;
9674 struct drm_crtc_state *new_crtc_state;
9678 * TODO: Remove this hack once the checks below are sufficient
9679 * enough to determine when we need to reset all the planes on
9682 if (state->allow_modeset)
9685 /* Exit early if we know that we're adding or removing the plane. */
9686 if (old_plane_state->crtc != new_plane_state->crtc)
9689 /* old crtc == new_crtc == NULL, plane not in context. */
9690 if (!new_plane_state->crtc)
9694 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9696 if (!new_crtc_state)
9699 /* CRTC Degamma changes currently require us to recreate planes. */
9700 if (new_crtc_state->color_mgmt_changed)
9703 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9707 * If there are any new primary or overlay planes being added or
9708 * removed then the z-order can potentially change. To ensure
9709 * correct z-order and pipe acquisition the current DC architecture
9710 * requires us to remove and recreate all existing planes.
9712 * TODO: Come up with a more elegant solution for this.
9714 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9715 struct amdgpu_framebuffer *old_afb, *new_afb;
9716 if (other->type == DRM_PLANE_TYPE_CURSOR)
9719 if (old_other_state->crtc != new_plane_state->crtc &&
9720 new_other_state->crtc != new_plane_state->crtc)
9723 if (old_other_state->crtc != new_other_state->crtc)
9726 /* Src/dst size and scaling updates. */
9727 if (old_other_state->src_w != new_other_state->src_w ||
9728 old_other_state->src_h != new_other_state->src_h ||
9729 old_other_state->crtc_w != new_other_state->crtc_w ||
9730 old_other_state->crtc_h != new_other_state->crtc_h)
9733 /* Rotation / mirroring updates. */
9734 if (old_other_state->rotation != new_other_state->rotation)
9737 /* Blending updates. */
9738 if (old_other_state->pixel_blend_mode !=
9739 new_other_state->pixel_blend_mode)
9742 /* Alpha updates. */
9743 if (old_other_state->alpha != new_other_state->alpha)
9746 /* Colorspace changes. */
9747 if (old_other_state->color_range != new_other_state->color_range ||
9748 old_other_state->color_encoding != new_other_state->color_encoding)
9751 /* Framebuffer checks fall at the end. */
9752 if (!old_other_state->fb || !new_other_state->fb)
9755 /* Pixel format changes can require bandwidth updates. */
9756 if (old_other_state->fb->format != new_other_state->fb->format)
9759 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9760 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9762 /* Tiling and DCC changes also require bandwidth updates. */
9763 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9764 old_afb->base.modifier != new_afb->base.modifier)
9771 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9772 struct drm_plane_state *new_plane_state,
9773 struct drm_framebuffer *fb)
9775 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9776 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9780 if (fb->width > new_acrtc->max_cursor_width ||
9781 fb->height > new_acrtc->max_cursor_height) {
9782 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9783 new_plane_state->fb->width,
9784 new_plane_state->fb->height);
9787 if (new_plane_state->src_w != fb->width << 16 ||
9788 new_plane_state->src_h != fb->height << 16) {
9789 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9793 /* Pitch in pixels */
9794 pitch = fb->pitches[0] / fb->format->cpp[0];
9796 if (fb->width != pitch) {
9797 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9806 /* FB pitch is supported by cursor plane */
9809 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9813 /* Core DRM takes care of checking FB modifiers, so we only need to
9814 * check tiling flags when the FB doesn't have a modifier. */
9815 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9816 if (adev->family < AMDGPU_FAMILY_AI) {
9817 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9818 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9819 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9821 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9824 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9832 static int dm_update_plane_state(struct dc *dc,
9833 struct drm_atomic_state *state,
9834 struct drm_plane *plane,
9835 struct drm_plane_state *old_plane_state,
9836 struct drm_plane_state *new_plane_state,
9838 bool *lock_and_validation_needed)
9841 struct dm_atomic_state *dm_state = NULL;
9842 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9843 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9844 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9845 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9846 struct amdgpu_crtc *new_acrtc;
9851 new_plane_crtc = new_plane_state->crtc;
9852 old_plane_crtc = old_plane_state->crtc;
9853 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9854 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9856 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9857 if (!enable || !new_plane_crtc ||
9858 drm_atomic_plane_disabling(plane->state, new_plane_state))
9861 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9863 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9864 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9868 if (new_plane_state->fb) {
9869 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9870 new_plane_state->fb);
9878 needs_reset = should_reset_plane(state, plane, old_plane_state,
9881 /* Remove any changed/removed planes */
9886 if (!old_plane_crtc)
9889 old_crtc_state = drm_atomic_get_old_crtc_state(
9890 state, old_plane_crtc);
9891 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9893 if (!dm_old_crtc_state->stream)
9896 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9897 plane->base.id, old_plane_crtc->base.id);
9899 ret = dm_atomic_get_state(state, &dm_state);
9903 if (!dc_remove_plane_from_context(
9905 dm_old_crtc_state->stream,
9906 dm_old_plane_state->dc_state,
9907 dm_state->context)) {
9913 dc_plane_state_release(dm_old_plane_state->dc_state);
9914 dm_new_plane_state->dc_state = NULL;
9916 *lock_and_validation_needed = true;
9918 } else { /* Add new planes */
9919 struct dc_plane_state *dc_new_plane_state;
9921 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9924 if (!new_plane_crtc)
9927 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9928 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9930 if (!dm_new_crtc_state->stream)
9936 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9940 WARN_ON(dm_new_plane_state->dc_state);
9942 dc_new_plane_state = dc_create_plane_state(dc);
9943 if (!dc_new_plane_state)
9946 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9947 plane->base.id, new_plane_crtc->base.id);
9949 ret = fill_dc_plane_attributes(
9950 drm_to_adev(new_plane_crtc->dev),
9955 dc_plane_state_release(dc_new_plane_state);
9959 ret = dm_atomic_get_state(state, &dm_state);
9961 dc_plane_state_release(dc_new_plane_state);
9966 * Any atomic check errors that occur after this will
9967 * not need a release. The plane state will be attached
9968 * to the stream, and therefore part of the atomic
9969 * state. It'll be released when the atomic state is
9972 if (!dc_add_plane_to_context(
9974 dm_new_crtc_state->stream,
9976 dm_state->context)) {
9978 dc_plane_state_release(dc_new_plane_state);
9982 dm_new_plane_state->dc_state = dc_new_plane_state;
9984 /* Tell DC to do a full surface update every time there
9985 * is a plane change. Inefficient, but works for now.
9987 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9989 *lock_and_validation_needed = true;
9996 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9997 struct drm_crtc *crtc,
9998 struct drm_crtc_state *new_crtc_state)
10000 struct drm_plane_state *new_cursor_state, *new_primary_state;
10001 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10003 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10004 * cursor per pipe but it's going to inherit the scaling and
10005 * positioning from the underlying pipe. Check the cursor plane's
10006 * blending properties match the primary plane's. */
10008 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10009 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10010 if (!new_cursor_state || !new_primary_state ||
10011 !new_cursor_state->fb || !new_primary_state->fb) {
10015 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10016 (new_cursor_state->src_w >> 16);
10017 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10018 (new_cursor_state->src_h >> 16);
10020 primary_scale_w = new_primary_state->crtc_w * 1000 /
10021 (new_primary_state->src_w >> 16);
10022 primary_scale_h = new_primary_state->crtc_h * 1000 /
10023 (new_primary_state->src_h >> 16);
10025 if (cursor_scale_w != primary_scale_w ||
10026 cursor_scale_h != primary_scale_h) {
10027 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10034 #if defined(CONFIG_DRM_AMD_DC_DCN)
10035 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10037 struct drm_connector *connector;
10038 struct drm_connector_state *conn_state;
10039 struct amdgpu_dm_connector *aconnector = NULL;
10041 for_each_new_connector_in_state(state, connector, conn_state, i) {
10042 if (conn_state->crtc != crtc)
10045 aconnector = to_amdgpu_dm_connector(connector);
10046 if (!aconnector->port || !aconnector->mst_port)
10055 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10059 static int validate_overlay(struct drm_atomic_state *state)
10062 struct drm_plane *plane;
10063 struct drm_plane_state *new_plane_state;
10064 struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10066 /* Check if primary plane is contained inside overlay */
10067 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10068 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10069 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10072 overlay_state = new_plane_state;
10077 /* check if we're making changes to the overlay plane */
10078 if (!overlay_state)
10081 /* check if overlay plane is enabled */
10082 if (!overlay_state->crtc)
10085 /* find the primary plane for the CRTC that the overlay is enabled on */
10086 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10087 if (IS_ERR(primary_state))
10088 return PTR_ERR(primary_state);
10090 /* check if primary plane is enabled */
10091 if (!primary_state->crtc)
10094 /* check if cursor plane is enabled */
10095 cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10096 if (IS_ERR(cursor_state))
10097 return PTR_ERR(cursor_state);
10099 if (drm_atomic_plane_disabling(plane->state, cursor_state))
10102 /* Perform the bounds check to ensure the overlay plane covers the primary */
10103 if (primary_state->crtc_x < overlay_state->crtc_x ||
10104 primary_state->crtc_y < overlay_state->crtc_y ||
10105 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10106 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10107 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10115 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10116 * @dev: The DRM device
10117 * @state: The atomic state to commit
10119 * Validate that the given atomic state is programmable by DC into hardware.
10120 * This involves constructing a &struct dc_state reflecting the new hardware
10121 * state we wish to commit, then querying DC to see if it is programmable. It's
10122 * important not to modify the existing DC state. Otherwise, atomic_check
10123 * may unexpectedly commit hardware changes.
10125 * When validating the DC state, it's important that the right locks are
10126 * acquired. For full updates case which removes/adds/updates streams on one
10127 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10128 * that any such full update commit will wait for completion of any outstanding
10129 * flip using DRMs synchronization events.
10131 * Note that DM adds the affected connectors for all CRTCs in state, when that
10132 * might not seem necessary. This is because DC stream creation requires the
10133 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10134 * be possible but non-trivial - a possible TODO item.
10136 * Return: -Error code if validation failed.
10138 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10139 struct drm_atomic_state *state)
10141 struct amdgpu_device *adev = drm_to_adev(dev);
10142 struct dm_atomic_state *dm_state = NULL;
10143 struct dc *dc = adev->dm.dc;
10144 struct drm_connector *connector;
10145 struct drm_connector_state *old_con_state, *new_con_state;
10146 struct drm_crtc *crtc;
10147 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10148 struct drm_plane *plane;
10149 struct drm_plane_state *old_plane_state, *new_plane_state;
10150 enum dc_status status;
10152 bool lock_and_validation_needed = false;
10153 struct dm_crtc_state *dm_old_crtc_state;
10155 trace_amdgpu_dm_atomic_check_begin(state);
10157 ret = drm_atomic_helper_check_modeset(dev, state);
10161 /* Check connector changes */
10162 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10163 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10164 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10166 /* Skip connectors that are disabled or part of modeset already. */
10167 if (!old_con_state->crtc && !new_con_state->crtc)
10170 if (!new_con_state->crtc)
10173 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10174 if (IS_ERR(new_crtc_state)) {
10175 ret = PTR_ERR(new_crtc_state);
10179 if (dm_old_con_state->abm_level !=
10180 dm_new_con_state->abm_level)
10181 new_crtc_state->connectors_changed = true;
10184 #if defined(CONFIG_DRM_AMD_DC_DCN)
10185 if (dc_resource_is_dsc_encoding_supported(dc)) {
10186 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10187 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10188 ret = add_affected_mst_dsc_crtcs(state, crtc);
10195 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10196 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10198 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10199 !new_crtc_state->color_mgmt_changed &&
10200 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10201 dm_old_crtc_state->dsc_force_changed == false)
10204 if (!new_crtc_state->enable)
10207 ret = drm_atomic_add_affected_connectors(state, crtc);
10211 ret = drm_atomic_add_affected_planes(state, crtc);
10215 if (dm_old_crtc_state->dsc_force_changed)
10216 new_crtc_state->mode_changed = true;
10220 * Add all primary and overlay planes on the CRTC to the state
10221 * whenever a plane is enabled to maintain correct z-ordering
10222 * and to enable fast surface updates.
10224 drm_for_each_crtc(crtc, dev) {
10225 bool modified = false;
10227 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10228 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10231 if (new_plane_state->crtc == crtc ||
10232 old_plane_state->crtc == crtc) {
10241 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10242 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10246 drm_atomic_get_plane_state(state, plane);
10248 if (IS_ERR(new_plane_state)) {
10249 ret = PTR_ERR(new_plane_state);
10255 /* Remove exiting planes if they are modified */
10256 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10257 ret = dm_update_plane_state(dc, state, plane,
10261 &lock_and_validation_needed);
10266 /* Disable all crtcs which require disable */
10267 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10268 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10272 &lock_and_validation_needed);
10277 /* Enable all crtcs which require enable */
10278 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10279 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10283 &lock_and_validation_needed);
10288 ret = validate_overlay(state);
10292 /* Add new/modified planes */
10293 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10294 ret = dm_update_plane_state(dc, state, plane,
10298 &lock_and_validation_needed);
10303 /* Run this here since we want to validate the streams we created */
10304 ret = drm_atomic_helper_check_planes(dev, state);
10308 /* Check cursor planes scaling */
10309 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10310 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10315 if (state->legacy_cursor_update) {
10317 * This is a fast cursor update coming from the plane update
10318 * helper, check if it can be done asynchronously for better
10321 state->async_update =
10322 !drm_atomic_helper_async_check(dev, state);
10325 * Skip the remaining global validation if this is an async
10326 * update. Cursor updates can be done without affecting
10327 * state or bandwidth calcs and this avoids the performance
10328 * penalty of locking the private state object and
10329 * allocating a new dc_state.
10331 if (state->async_update)
10335 /* Check scaling and underscan changes*/
10336 /* TODO Removed scaling changes validation due to inability to commit
10337 * new stream into context w\o causing full reset. Need to
10338 * decide how to handle.
10340 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10341 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10342 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10343 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10345 /* Skip any modesets/resets */
10346 if (!acrtc || drm_atomic_crtc_needs_modeset(
10347 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10350 /* Skip any thing not scale or underscan changes */
10351 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10354 lock_and_validation_needed = true;
10358 * Streams and planes are reset when there are changes that affect
10359 * bandwidth. Anything that affects bandwidth needs to go through
10360 * DC global validation to ensure that the configuration can be applied
10363 * We have to currently stall out here in atomic_check for outstanding
10364 * commits to finish in this case because our IRQ handlers reference
10365 * DRM state directly - we can end up disabling interrupts too early
10368 * TODO: Remove this stall and drop DM state private objects.
10370 if (lock_and_validation_needed) {
10371 ret = dm_atomic_get_state(state, &dm_state);
10375 ret = do_aquire_global_lock(dev, state);
10379 #if defined(CONFIG_DRM_AMD_DC_DCN)
10380 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10383 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10389 * Perform validation of MST topology in the state:
10390 * We need to perform MST atomic check before calling
10391 * dc_validate_global_state(), or there is a chance
10392 * to get stuck in an infinite loop and hang eventually.
10394 ret = drm_dp_mst_atomic_check(state);
10397 status = dc_validate_global_state(dc, dm_state->context, false);
10398 if (status != DC_OK) {
10399 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10400 dc_status_to_str(status), status);
10406 * The commit is a fast update. Fast updates shouldn't change
10407 * the DC context, affect global validation, and can have their
10408 * commit work done in parallel with other commits not touching
10409 * the same resource. If we have a new DC context as part of
10410 * the DM atomic state from validation we need to free it and
10411 * retain the existing one instead.
10413 * Furthermore, since the DM atomic state only contains the DC
10414 * context and can safely be annulled, we can free the state
10415 * and clear the associated private object now to free
10416 * some memory and avoid a possible use-after-free later.
10419 for (i = 0; i < state->num_private_objs; i++) {
10420 struct drm_private_obj *obj = state->private_objs[i].ptr;
10422 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10423 int j = state->num_private_objs-1;
10425 dm_atomic_destroy_state(obj,
10426 state->private_objs[i].state);
10428 /* If i is not at the end of the array then the
10429 * last element needs to be moved to where i was
10430 * before the array can safely be truncated.
10433 state->private_objs[i] =
10434 state->private_objs[j];
10436 state->private_objs[j].ptr = NULL;
10437 state->private_objs[j].state = NULL;
10438 state->private_objs[j].old_state = NULL;
10439 state->private_objs[j].new_state = NULL;
10441 state->num_private_objs = j;
10447 /* Store the overall update type for use later in atomic check. */
10448 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10449 struct dm_crtc_state *dm_new_crtc_state =
10450 to_dm_crtc_state(new_crtc_state);
10452 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10457 /* Must be success */
10460 trace_amdgpu_dm_atomic_check_finish(state, ret);
10465 if (ret == -EDEADLK)
10466 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10467 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10468 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10470 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10472 trace_amdgpu_dm_atomic_check_finish(state, ret);
10477 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10478 struct amdgpu_dm_connector *amdgpu_dm_connector)
10481 bool capable = false;
10483 if (amdgpu_dm_connector->dc_link &&
10484 dm_helpers_dp_read_dpcd(
10486 amdgpu_dm_connector->dc_link,
10487 DP_DOWN_STREAM_PORT_COUNT,
10489 sizeof(dpcd_data))) {
10490 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10496 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10497 uint8_t *edid_ext, int len,
10498 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10501 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10502 struct dc *dc = adev->dm.dc;
10504 /* send extension block to DMCU for parsing */
10505 for (i = 0; i < len; i += 8) {
10509 /* send 8 bytes a time */
10510 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10514 /* EDID block sent completed, expect result */
10515 int version, min_rate, max_rate;
10517 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10519 /* amd vsdb found */
10520 vsdb_info->freesync_supported = 1;
10521 vsdb_info->amd_vsdb_version = version;
10522 vsdb_info->min_refresh_rate_hz = min_rate;
10523 vsdb_info->max_refresh_rate_hz = max_rate;
10531 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10539 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10540 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10542 uint8_t *edid_ext = NULL;
10544 bool valid_vsdb_found = false;
10546 /*----- drm_find_cea_extension() -----*/
10547 /* No EDID or EDID extensions */
10548 if (edid == NULL || edid->extensions == 0)
10551 /* Find CEA extension */
10552 for (i = 0; i < edid->extensions; i++) {
10553 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10554 if (edid_ext[0] == CEA_EXT)
10558 if (i == edid->extensions)
10561 /*----- cea_db_offsets() -----*/
10562 if (edid_ext[0] != CEA_EXT)
10565 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10567 return valid_vsdb_found ? i : -ENODEV;
10570 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10574 struct detailed_timing *timing;
10575 struct detailed_non_pixel *data;
10576 struct detailed_data_monitor_range *range;
10577 struct amdgpu_dm_connector *amdgpu_dm_connector =
10578 to_amdgpu_dm_connector(connector);
10579 struct dm_connector_state *dm_con_state = NULL;
10581 struct drm_device *dev = connector->dev;
10582 struct amdgpu_device *adev = drm_to_adev(dev);
10583 bool freesync_capable = false;
10584 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10586 if (!connector->state) {
10587 DRM_ERROR("%s - Connector has no state", __func__);
10592 dm_con_state = to_dm_connector_state(connector->state);
10594 amdgpu_dm_connector->min_vfreq = 0;
10595 amdgpu_dm_connector->max_vfreq = 0;
10596 amdgpu_dm_connector->pixel_clock_mhz = 0;
10601 dm_con_state = to_dm_connector_state(connector->state);
10603 if (!amdgpu_dm_connector->dc_sink) {
10604 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10607 if (!adev->dm.freesync_module)
10611 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10612 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10613 bool edid_check_required = false;
10616 edid_check_required = is_dp_capable_without_timing_msa(
10618 amdgpu_dm_connector);
10621 if (edid_check_required == true && (edid->version > 1 ||
10622 (edid->version == 1 && edid->revision > 1))) {
10623 for (i = 0; i < 4; i++) {
10625 timing = &edid->detailed_timings[i];
10626 data = &timing->data.other_data;
10627 range = &data->data.range;
10629 * Check if monitor has continuous frequency mode
10631 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10634 * Check for flag range limits only. If flag == 1 then
10635 * no additional timing information provided.
10636 * Default GTF, GTF Secondary curve and CVT are not
10639 if (range->flags != 1)
10642 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10643 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10644 amdgpu_dm_connector->pixel_clock_mhz =
10645 range->pixel_clock_mhz * 10;
10647 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10648 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10653 if (amdgpu_dm_connector->max_vfreq -
10654 amdgpu_dm_connector->min_vfreq > 10) {
10656 freesync_capable = true;
10659 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10660 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10661 if (i >= 0 && vsdb_info.freesync_supported) {
10662 timing = &edid->detailed_timings[i];
10663 data = &timing->data.other_data;
10665 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10666 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10667 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10668 freesync_capable = true;
10670 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10671 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10677 dm_con_state->freesync_capable = freesync_capable;
10679 if (connector->vrr_capable_property)
10680 drm_connector_set_vrr_capable_property(connector,
10684 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10686 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10688 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10690 if (link->type == dc_connection_none)
10692 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10693 dpcd_data, sizeof(dpcd_data))) {
10694 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10696 if (dpcd_data[0] == 0) {
10697 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10698 link->psr_settings.psr_feature_enabled = false;
10700 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10701 link->psr_settings.psr_feature_enabled = true;
10704 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10709 * amdgpu_dm_link_setup_psr() - configure psr link
10710 * @stream: stream state
10712 * Return: true if success
10714 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10716 struct dc_link *link = NULL;
10717 struct psr_config psr_config = {0};
10718 struct psr_context psr_context = {0};
10721 if (stream == NULL)
10724 link = stream->link;
10726 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10728 if (psr_config.psr_version > 0) {
10729 psr_config.psr_exit_link_training_required = 0x1;
10730 psr_config.psr_frame_capture_indication_req = 0;
10731 psr_config.psr_rfb_setup_time = 0x37;
10732 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10733 psr_config.allow_smu_optimizations = 0x0;
10735 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10738 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10744 * amdgpu_dm_psr_enable() - enable psr f/w
10745 * @stream: stream state
10747 * Return: true if success
10749 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10751 struct dc_link *link = stream->link;
10752 unsigned int vsync_rate_hz = 0;
10753 struct dc_static_screen_params params = {0};
10754 /* Calculate number of static frames before generating interrupt to
10757 // Init fail safe of 2 frames static
10758 unsigned int num_frames_static = 2;
10760 DRM_DEBUG_DRIVER("Enabling psr...\n");
10762 vsync_rate_hz = div64_u64(div64_u64((
10763 stream->timing.pix_clk_100hz * 100),
10764 stream->timing.v_total),
10765 stream->timing.h_total);
10768 * Calculate number of frames such that at least 30 ms of time has
10771 if (vsync_rate_hz != 0) {
10772 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10773 num_frames_static = (30000 / frame_time_microsec) + 1;
10776 params.triggers.cursor_update = true;
10777 params.triggers.overlay_update = true;
10778 params.triggers.surface_update = true;
10779 params.num_frames = num_frames_static;
10781 dc_stream_set_static_screen_params(link->ctx->dc,
10785 return dc_link_set_psr_allow_active(link, true, false, false);
10789 * amdgpu_dm_psr_disable() - disable psr f/w
10790 * @stream: stream state
10792 * Return: true if success
10794 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10797 DRM_DEBUG_DRIVER("Disabling psr...\n");
10799 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10803 * amdgpu_dm_psr_disable() - disable psr f/w
10804 * if psr is enabled on any stream
10806 * Return: true if success
10808 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10810 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10811 return dc_set_psr_allow_active(dm->dc, false);
10814 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10816 struct amdgpu_device *adev = drm_to_adev(dev);
10817 struct dc *dc = adev->dm.dc;
10820 mutex_lock(&adev->dm.dc_lock);
10821 if (dc->current_state) {
10822 for (i = 0; i < dc->current_state->stream_count; ++i)
10823 dc->current_state->streams[i]
10824 ->triggered_crtc_reset.enabled =
10825 adev->dm.force_timing_sync;
10827 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10828 dc_trigger_sync(dc, dc->current_state);
10830 mutex_unlock(&adev->dm.dc_lock);
10833 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10834 uint32_t value, const char *func_name)
10836 #ifdef DM_CHECK_ADDR_0
10837 if (address == 0) {
10838 DC_ERR("invalid register write. address = 0");
10842 cgs_write_register(ctx->cgs_device, address, value);
10843 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10846 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10847 const char *func_name)
10850 #ifdef DM_CHECK_ADDR_0
10851 if (address == 0) {
10852 DC_ERR("invalid register read; address = 0\n");
10857 if (ctx->dmub_srv &&
10858 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10859 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10864 value = cgs_read_register(ctx->cgs_device, address);
10866 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10871 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10872 struct aux_payload *payload, enum aux_return_code_type *operation_result)
10874 struct amdgpu_device *adev = ctx->driver_context;
10877 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10878 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10880 *operation_result = AUX_RET_ERROR_TIMEOUT;
10883 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10885 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10886 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10888 // For read case, Copy data to payload
10889 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10890 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10891 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10892 adev->dm.dmub_notify->aux_reply.length);
10895 return adev->dm.dmub_notify->aux_reply.length;