2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 * dm_vblank_get_counter
219 * Get counter for number of vertical blanks
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
226 * Counter for vertical blanks
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 if (crtc >= adev->mode_info.num_crtc)
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 if (acrtc->dm_irq_params.stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 if (acrtc->dm_irq_params.stream == NULL) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
278 static bool dm_is_idle(void *handle)
284 static int dm_wait_for_idle(void *handle)
290 static bool dm_check_soft_reset(void *handle)
295 static int dm_soft_reset(void *handle)
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 struct drm_device *dev = adev_to_drm(adev);
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
309 if (otg_inst == -1) {
311 return adev->mode_info.crtcs[0];
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
317 if (amdgpu_crtc->otg_inst == otg_inst)
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
345 static void dm_pflip_high_irq(void *interrupt_params)
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
351 struct drm_pending_vblank_event *e;
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 /* IRQ could occur when in initial stage */
358 /* TODO work and BO cleanup */
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
443 static void dm_vupdate_high_irq(void *interrupt_params)
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
467 drm_crtc_handle_vblank(&acrtc->base);
469 /* BTR processing for pre-DCE12 ASICs */
470 if (acrtc->dm_irq_params.stream &&
471 adev->family < AMDGPU_FAMILY_AI) {
472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
478 dc_stream_adjust_vmin_vmax(
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
489 * dm_crtc_high_irq() - Handles CRTC interrupt
490 * @interrupt_params: used for determining the CRTC instance
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
495 static void dm_crtc_high_irq(void *interrupt_params)
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 vrr_active, acrtc->dm_irq_params.active_planes);
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
519 drm_crtc_handle_vblank(&acrtc->base);
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
537 mod_freesync_handle_v_update(adev->dm.freesync_module,
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 acrtc->dm_irq_params.active_planes == 0) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 drm_crtc_vblank_put(&acrtc->base);
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
569 static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
575 static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
584 /* Allocate memory for FBC compressed data */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 struct drm_device *dev = connector->dev;
588 struct amdgpu_device *adev = drm_to_adev(dev);
589 struct dm_compressor_info *compressor = &adev->dm.compressor;
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
592 unsigned long max_size = 0;
594 if (adev->dm.dc->fbc_compressor == NULL)
597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
600 if (compressor->bo_ptr)
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 &compressor->gpu_addr, &compressor->cpu_addr);
615 DRM_ERROR("DM: Failed to initialize FBC\n");
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
629 struct drm_device *dev = dev_get_drvdata(kdev);
630 struct amdgpu_device *adev = drm_to_adev(dev);
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
638 mutex_lock(&adev->dm.audio_lock);
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
652 drm_connector_list_iter_end(&conn_iter);
654 mutex_unlock(&adev->dm.audio_lock);
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_audio_component *acomp = data;
672 acomp->ops = &amdgpu_dm_audio_component_ops;
674 adev->dm.audio_component = acomp;
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
682 struct drm_device *dev = dev_get_drvdata(kdev);
683 struct amdgpu_device *adev = drm_to_adev(dev);
684 struct drm_audio_component *acomp = data;
688 adev->dm.audio_component = NULL;
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
703 adev->mode_info.audio.enabled = true;
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
723 adev->dm.audio_registered = true;
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 if (!adev->mode_info.audio.enabled)
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
741 /* TODO: Disable audio? */
743 adev->mode_info.audio.enabled = false;
746 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 struct drm_audio_component *acomp = adev->dm.audio_component;
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
773 /* DMUB isn't supported on the ASIC. */
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
808 /* Copy firmware and bios info into FB memory. */
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
828 /* Copy firmware bios info into FB memory. */
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
852 hw_params.psp_version = dmcu->psp_version;
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 /* Init DMCU and ABM if available. */
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 pa_config->is_hvm_enabled = 0;
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
944 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 struct amdgpu_display_manager *dm = vblank_work->dm;
947 mutex_lock(&dm->dc_lock);
949 if (vblank_work->enable)
950 dm->active_vblank_irq_count++;
952 dm->active_vblank_irq_count--;
955 dc_allow_idle_optimizations(
956 dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
958 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
961 mutex_unlock(&dm->dc_lock);
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
967 int max_caps = dc->caps.max_links;
968 struct vblank_workqueue *vblank_work;
971 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 if (ZERO_OR_NULL_PTR(vblank_work)) {
977 for (i = 0; i < max_caps; i++)
978 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
985 struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 struct dc_callback_init init_params;
991 adev->dm.ddev = adev_to_drm(adev);
992 adev->dm.adev = adev;
994 /* Zero all the fields */
995 memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997 memset(&init_params, 0, sizeof(init_params));
1000 mutex_init(&adev->dm.dc_lock);
1001 mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003 spin_lock_init(&adev->dm.vblank_lock);
1006 if(amdgpu_dm_irq_init(adev)) {
1007 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1011 init_data.asic_id.chip_family = adev->family;
1013 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1016 init_data.asic_id.vram_width = adev->gmc.vram_width;
1017 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 init_data.asic_id.atombios_base_address =
1019 adev->mode_info.atom_context->bios;
1021 init_data.driver = adev;
1023 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1025 if (!adev->dm.cgs_device) {
1026 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1030 init_data.cgs_device = adev->dm.cgs_device;
1032 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1034 switch (adev->asic_type) {
1039 init_data.flags.gpu_vm_support = true;
1040 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 init_data.flags.disable_dmcu = true;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1045 init_data.flags.gpu_vm_support = true;
1052 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 init_data.flags.fbc_support = true;
1055 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 init_data.flags.multi_mon_pp_mclk_switch = true;
1058 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 init_data.flags.disable_fractional_pwm = true;
1061 init_data.flags.power_down_display_on_boot = true;
1063 /* Display Core create. */
1064 adev->dm.dc = dc_create(&init_data);
1067 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1069 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1078 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1081 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 adev->dm.dc->debug.disable_stutter = true;
1084 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 adev->dm.dc->debug.disable_dsc = true;
1087 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 adev->dm.dc->debug.disable_clock_gate = true;
1090 r = dm_dmub_hw_init(adev);
1092 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1096 dc_hardware_init(adev->dm.dc);
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099 if (adev->apu_flags) {
1100 struct dc_phy_addr_space_config pa_config;
1102 mmhub_read_system_context(adev, &pa_config);
1104 // Call the DC init_memory func
1105 dc_setup_system_context(adev->dm.dc, &pa_config);
1109 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 if (!adev->dm.freesync_module) {
1112 "amdgpu: failed to initialize freesync_module.\n");
1114 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 adev->dm.freesync_module);
1117 amdgpu_dm_init_color_mod();
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 if (adev->dm.dc->caps.max_links > 0) {
1121 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1123 if (!adev->dm.vblank_workqueue)
1124 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1126 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1134 if (!adev->dm.hdcp_workqueue)
1135 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1137 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1139 dc_init_callbacks(adev->dm.dc, &init_params);
1142 if (amdgpu_dm_initialize_drm_device(adev)) {
1144 "amdgpu: failed to initialize sw for display support.\n");
1148 /* create fake encoders for MST */
1149 dm_dp_create_fake_mst_encoders(adev);
1151 /* TODO: Add_display_info? */
1153 /* TODO use dynamic cursor width */
1154 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1157 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1159 "amdgpu: failed to initialize sw for display support.\n");
1164 DRM_DEBUG_DRIVER("KMS initialized.\n");
1168 amdgpu_dm_fini(adev);
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1177 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1181 amdgpu_dm_audio_fini(adev);
1183 amdgpu_dm_destroy_drm_device(&adev->dm);
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186 if (adev->dm.hdcp_workqueue) {
1187 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188 adev->dm.hdcp_workqueue = NULL;
1192 dc_deinit_callbacks(adev->dm.dc);
1194 if (adev->dm.dc->ctx->dmub_srv) {
1195 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 adev->dm.dc->ctx->dmub_srv = NULL;
1199 if (adev->dm.dmub_bo)
1200 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 &adev->dm.dmub_bo_gpu_addr,
1202 &adev->dm.dmub_bo_cpu_addr);
1204 /* DC Destroy TODO: Replace destroy DAL */
1206 dc_destroy(&adev->dm.dc);
1208 * TODO: pageflip, vlank interrupt
1210 * amdgpu_dm_irq_fini(adev);
1213 if (adev->dm.cgs_device) {
1214 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 adev->dm.cgs_device = NULL;
1217 if (adev->dm.freesync_module) {
1218 mod_freesync_destroy(adev->dm.freesync_module);
1219 adev->dm.freesync_module = NULL;
1222 mutex_destroy(&adev->dm.audio_lock);
1223 mutex_destroy(&adev->dm.dc_lock);
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1230 const char *fw_name_dmcu = NULL;
1232 const struct dmcu_firmware_header_v1_0 *hdr;
1234 switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1250 case CHIP_POLARIS11:
1251 case CHIP_POLARIS10:
1252 case CHIP_POLARIS12:
1260 case CHIP_SIENNA_CICHLID:
1261 case CHIP_NAVY_FLOUNDER:
1262 case CHIP_DIMGREY_CAVEFISH:
1266 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1269 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1277 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1281 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1286 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1288 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 adev->dm.fw_dmcu = NULL;
1294 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1299 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1301 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1303 release_firmware(adev->dm.fw_dmcu);
1304 adev->dm.fw_dmcu = NULL;
1308 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 adev->firmware.fw_size +=
1312 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1314 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 adev->firmware.fw_size +=
1317 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1319 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1321 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1328 struct amdgpu_device *adev = ctx;
1330 return dm_read_reg(adev->dm.dc->ctx, address);
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1336 struct amdgpu_device *adev = ctx;
1338 return dm_write_reg(adev->dm.dc->ctx, address, value);
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1343 struct dmub_srv_create_params create_params;
1344 struct dmub_srv_region_params region_params;
1345 struct dmub_srv_region_info region_info;
1346 struct dmub_srv_fb_params fb_params;
1347 struct dmub_srv_fb_info *fb_info;
1348 struct dmub_srv *dmub_srv;
1349 const struct dmcub_firmware_header_v1_0 *hdr;
1350 const char *fw_name_dmub;
1351 enum dmub_asic dmub_asic;
1352 enum dmub_status status;
1355 switch (adev->asic_type) {
1357 dmub_asic = DMUB_ASIC_DCN21;
1358 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1362 case CHIP_SIENNA_CICHLID:
1363 dmub_asic = DMUB_ASIC_DCN30;
1364 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1366 case CHIP_NAVY_FLOUNDER:
1367 dmub_asic = DMUB_ASIC_DCN30;
1368 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1371 dmub_asic = DMUB_ASIC_DCN301;
1372 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1374 case CHIP_DIMGREY_CAVEFISH:
1375 dmub_asic = DMUB_ASIC_DCN302;
1376 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1380 /* ASIC doesn't support DMUB. */
1384 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1386 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1390 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1392 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1396 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1398 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 AMDGPU_UCODE_ID_DMCUB;
1401 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1403 adev->firmware.fw_size +=
1404 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1406 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 adev->dm.dmcub_fw_version);
1410 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1412 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 dmub_srv = adev->dm.dmub_srv;
1416 DRM_ERROR("Failed to allocate DMUB service!\n");
1420 memset(&create_params, 0, sizeof(create_params));
1421 create_params.user_ctx = adev;
1422 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 create_params.asic = dmub_asic;
1426 /* Create the DMUB service. */
1427 status = dmub_srv_create(dmub_srv, &create_params);
1428 if (status != DMUB_STATUS_OK) {
1429 DRM_ERROR("Error creating DMUB service: %d\n", status);
1433 /* Calculate the size of all the regions for the DMUB service. */
1434 memset(®ion_params, 0, sizeof(region_params));
1436 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 region_params.vbios_size = adev->bios_size;
1440 region_params.fw_bss_data = region_params.bss_data_size ?
1441 adev->dm.dmub_fw->data +
1442 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444 region_params.fw_inst_const =
1445 adev->dm.dmub_fw->data +
1446 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1449 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1452 if (status != DMUB_STATUS_OK) {
1453 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1458 * Allocate a framebuffer based on the total size of all the regions.
1459 * TODO: Move this into GART.
1461 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 &adev->dm.dmub_bo_gpu_addr,
1464 &adev->dm.dmub_bo_cpu_addr);
1468 /* Rebase the regions on the framebuffer address. */
1469 memset(&fb_params, 0, sizeof(fb_params));
1470 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 fb_params.region_info = ®ion_info;
1474 adev->dm.dmub_fb_info =
1475 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 fb_info = adev->dm.dmub_fb_info;
1480 "Failed to allocate framebuffer info for DMUB service!\n");
1484 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 if (status != DMUB_STATUS_OK) {
1486 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1493 static int dm_sw_init(void *handle)
1495 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498 r = dm_dmub_sw_init(adev);
1502 return load_dmcu_fw(adev);
1505 static int dm_sw_fini(void *handle)
1507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1509 kfree(adev->dm.dmub_fb_info);
1510 adev->dm.dmub_fb_info = NULL;
1512 if (adev->dm.dmub_srv) {
1513 dmub_srv_destroy(adev->dm.dmub_srv);
1514 adev->dm.dmub_srv = NULL;
1517 release_firmware(adev->dm.dmub_fw);
1518 adev->dm.dmub_fw = NULL;
1520 release_firmware(adev->dm.fw_dmcu);
1521 adev->dm.fw_dmcu = NULL;
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1528 struct amdgpu_dm_connector *aconnector;
1529 struct drm_connector *connector;
1530 struct drm_connector_list_iter iter;
1533 drm_connector_list_iter_begin(dev, &iter);
1534 drm_for_each_connector_iter(connector, &iter) {
1535 aconnector = to_amdgpu_dm_connector(connector);
1536 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 aconnector->mst_mgr.aux) {
1538 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1540 aconnector->base.base.id);
1542 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1544 DRM_ERROR("DM_MST: Failed to start MST\n");
1545 aconnector->dc_link->type =
1546 dc_connection_single;
1551 drm_connector_list_iter_end(&iter);
1556 static int dm_late_init(void *handle)
1558 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1560 struct dmcu_iram_parameters params;
1561 unsigned int linear_lut[16];
1563 struct dmcu *dmcu = NULL;
1566 dmcu = adev->dm.dc->res_pool->dmcu;
1568 for (i = 0; i < 16; i++)
1569 linear_lut[i] = 0xFFFF * i / 15;
1572 params.backlight_ramping_start = 0xCCCC;
1573 params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 params.backlight_lut_array_size = 16;
1575 params.backlight_lut_array = linear_lut;
1577 /* Min backlight level after ABM reduction, Don't allow below 1%
1578 * 0xFFFF x 0.01 = 0x28F
1580 params.min_abm_backlight = 0x28F;
1582 /* In the case where abm is implemented on dmcub,
1583 * dmcu object will be null.
1584 * ABM 2.4 and up are implemented on dmcub.
1587 ret = dmcu_load_iram(dmcu, params);
1588 else if (adev->dm.dc->ctx->dmub_srv)
1589 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1594 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1599 struct amdgpu_dm_connector *aconnector;
1600 struct drm_connector *connector;
1601 struct drm_connector_list_iter iter;
1602 struct drm_dp_mst_topology_mgr *mgr;
1604 bool need_hotplug = false;
1606 drm_connector_list_iter_begin(dev, &iter);
1607 drm_for_each_connector_iter(connector, &iter) {
1608 aconnector = to_amdgpu_dm_connector(connector);
1609 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 aconnector->mst_port)
1613 mgr = &aconnector->mst_mgr;
1616 drm_dp_mst_topology_mgr_suspend(mgr);
1618 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1620 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 need_hotplug = true;
1625 drm_connector_list_iter_end(&iter);
1628 drm_kms_helper_hotplug_event(dev);
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1633 struct smu_context *smu = &adev->smu;
1636 if (!is_support_sw_smu(adev))
1639 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 * on window driver dc implementation.
1641 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 * should be passed to smu during boot up and resume from s3.
1643 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 * dcn20_resource_construct
1645 * then call pplib functions below to pass the settings to smu:
1646 * smu_set_watermarks_for_clock_ranges
1647 * smu_set_watermarks_table
1648 * navi10_set_watermarks_table
1649 * smu_write_watermarks_table
1651 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 * dc has implemented different flow for window driver:
1653 * dc_hardware_init / dc_set_power_state
1658 * smu_set_watermarks_for_clock_ranges
1659 * renoir_set_watermarks_table
1660 * smu_write_watermarks_table
1663 * dc_hardware_init -> amdgpu_dm_init
1664 * dc_set_power_state --> dm_resume
1666 * therefore, this function apply to navi10/12/14 but not Renoir
1669 switch(adev->asic_type) {
1678 ret = smu_write_watermarks_table(smu);
1680 DRM_ERROR("Failed to update WMTABLE!\n");
1688 * dm_hw_init() - Initialize DC device
1689 * @handle: The base driver device containing the amdgpu_dm device.
1691 * Initialize the &struct amdgpu_display_manager device. This involves calling
1692 * the initializers of each DM component, then populating the struct with them.
1694 * Although the function implies hardware initialization, both hardware and
1695 * software are initialized here. Splitting them out to their relevant init
1696 * hooks is a future TODO item.
1698 * Some notable things that are initialized here:
1700 * - Display Core, both software and hardware
1701 * - DC modules that we need (freesync and color management)
1702 * - DRM software states
1703 * - Interrupt sources and handlers
1705 * - Debug FS entries, if enabled
1707 static int dm_hw_init(void *handle)
1709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 /* Create DAL display manager */
1711 amdgpu_dm_init(adev);
1712 amdgpu_dm_hpd_init(adev);
1718 * dm_hw_fini() - Teardown DC device
1719 * @handle: The base driver device containing the amdgpu_dm device.
1721 * Teardown components within &struct amdgpu_display_manager that require
1722 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723 * were loaded. Also flush IRQ workqueues and disable them.
1725 static int dm_hw_fini(void *handle)
1727 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1729 amdgpu_dm_hpd_fini(adev);
1731 amdgpu_dm_irq_fini(adev);
1732 amdgpu_dm_fini(adev);
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 struct dc_state *state, bool enable)
1743 enum dc_irq_source irq_source;
1744 struct amdgpu_crtc *acrtc;
1748 for (i = 0; i < state->stream_count; i++) {
1749 acrtc = get_crtc_by_otg_inst(
1750 adev, state->stream_status[i].primary_otg_inst);
1752 if (acrtc && state->stream_status[i].plane_count != 0) {
1753 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 acrtc->crtc_id, enable ? "en" : "dis", rc);
1758 DRM_WARN("Failed to %s pflip interrupts\n",
1759 enable ? "enable" : "disable");
1762 rc = dm_enable_vblank(&acrtc->base);
1764 DRM_WARN("Failed to enable vblank interrupts\n");
1766 dm_disable_vblank(&acrtc->base);
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1776 struct dc_state *context = NULL;
1777 enum dc_status res = DC_ERROR_UNEXPECTED;
1779 struct dc_stream_state *del_streams[MAX_PIPES];
1780 int del_streams_count = 0;
1782 memset(del_streams, 0, sizeof(del_streams));
1784 context = dc_create_state(dc);
1785 if (context == NULL)
1786 goto context_alloc_fail;
1788 dc_resource_state_copy_construct_current(dc, context);
1790 /* First remove from context all streams */
1791 for (i = 0; i < context->stream_count; i++) {
1792 struct dc_stream_state *stream = context->streams[i];
1794 del_streams[del_streams_count++] = stream;
1797 /* Remove all planes for removed streams and then remove the streams */
1798 for (i = 0; i < del_streams_count; i++) {
1799 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 res = DC_FAIL_DETACH_SURFACES;
1804 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1810 res = dc_validate_global_state(dc, context, false);
1813 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1817 res = dc_commit_state(dc, context);
1820 dc_release_state(context);
1826 static int dm_suspend(void *handle)
1828 struct amdgpu_device *adev = handle;
1829 struct amdgpu_display_manager *dm = &adev->dm;
1832 if (amdgpu_in_reset(adev)) {
1833 mutex_lock(&dm->dc_lock);
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836 dc_allow_idle_optimizations(adev->dm.dc, false);
1839 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1841 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1843 amdgpu_dm_commit_zero_streams(dm->dc);
1845 amdgpu_dm_irq_suspend(adev);
1850 WARN_ON(adev->dm.cached_state);
1851 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1853 s3_handle_mst(adev_to_drm(adev), true);
1855 amdgpu_dm_irq_suspend(adev);
1858 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 struct drm_crtc *crtc)
1868 struct drm_connector_state *new_con_state;
1869 struct drm_connector *connector;
1870 struct drm_crtc *crtc_from_state;
1872 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 crtc_from_state = new_con_state->crtc;
1875 if (crtc_from_state == crtc)
1876 return to_amdgpu_dm_connector(connector);
1882 static void emulated_link_detect(struct dc_link *link)
1884 struct dc_sink_init_data sink_init_data = { 0 };
1885 struct display_sink_capability sink_caps = { 0 };
1886 enum dc_edid_status edid_status;
1887 struct dc_context *dc_ctx = link->ctx;
1888 struct dc_sink *sink = NULL;
1889 struct dc_sink *prev_sink = NULL;
1891 link->type = dc_connection_none;
1892 prev_sink = link->local_sink;
1895 dc_sink_release(prev_sink);
1897 switch (link->connector_signal) {
1898 case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1904 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1910 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1916 case SIGNAL_TYPE_LVDS: {
1917 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 sink_caps.signal = SIGNAL_TYPE_LVDS;
1922 case SIGNAL_TYPE_EDP: {
1923 sink_caps.transaction_type =
1924 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 sink_caps.signal = SIGNAL_TYPE_EDP;
1929 case SIGNAL_TYPE_DISPLAY_PORT: {
1930 sink_caps.transaction_type =
1931 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1937 DC_ERROR("Invalid connector type! signal:%d\n",
1938 link->connector_signal);
1942 sink_init_data.link = link;
1943 sink_init_data.sink_signal = sink_caps.signal;
1945 sink = dc_sink_create(&sink_init_data);
1947 DC_ERROR("Failed to create sink!\n");
1951 /* dc_sink_create returns a new reference */
1952 link->local_sink = sink;
1954 edid_status = dm_helpers_read_local_edid(
1959 if (edid_status != EDID_OK)
1960 DC_ERROR("Failed to read EDID");
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 struct amdgpu_display_manager *dm)
1968 struct dc_surface_update surface_updates[MAX_SURFACES];
1969 struct dc_plane_info plane_infos[MAX_SURFACES];
1970 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 struct dc_stream_update stream_update;
1976 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1979 dm_error("Failed to allocate update bundle\n");
1983 for (k = 0; k < dc_state->stream_count; k++) {
1984 bundle->stream_update.stream = dc_state->streams[k];
1986 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 bundle->surface_updates[m].surface =
1988 dc_state->stream_status->plane_states[m];
1989 bundle->surface_updates[m].surface->force_full_update =
1992 dc_commit_updates_for_stream(
1993 dm->dc, bundle->surface_updates,
1994 dc_state->stream_status->plane_count,
1995 dc_state->streams[k], &bundle->stream_update, dc_state);
2004 static void dm_set_dpms_off(struct dc_link *link)
2006 struct dc_stream_state *stream_state;
2007 struct amdgpu_dm_connector *aconnector = link->priv;
2008 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 struct dc_stream_update stream_update;
2010 bool dpms_off = true;
2012 memset(&stream_update, 0, sizeof(stream_update));
2013 stream_update.dpms_off = &dpms_off;
2015 mutex_lock(&adev->dm.dc_lock);
2016 stream_state = dc_stream_find_from_link(link);
2018 if (stream_state == NULL) {
2019 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 mutex_unlock(&adev->dm.dc_lock);
2024 stream_update.stream = stream_state;
2025 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026 stream_state, &stream_update,
2027 stream_state->ctx->dc->current_state);
2028 mutex_unlock(&adev->dm.dc_lock);
2031 static int dm_resume(void *handle)
2033 struct amdgpu_device *adev = handle;
2034 struct drm_device *ddev = adev_to_drm(adev);
2035 struct amdgpu_display_manager *dm = &adev->dm;
2036 struct amdgpu_dm_connector *aconnector;
2037 struct drm_connector *connector;
2038 struct drm_connector_list_iter iter;
2039 struct drm_crtc *crtc;
2040 struct drm_crtc_state *new_crtc_state;
2041 struct dm_crtc_state *dm_new_crtc_state;
2042 struct drm_plane *plane;
2043 struct drm_plane_state *new_plane_state;
2044 struct dm_plane_state *dm_new_plane_state;
2045 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046 enum dc_connection_type new_connection_type = dc_connection_none;
2047 struct dc_state *dc_state;
2050 if (amdgpu_in_reset(adev)) {
2051 dc_state = dm->cached_dc_state;
2053 r = dm_dmub_hw_init(adev);
2055 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2057 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2060 amdgpu_dm_irq_resume_early(adev);
2062 for (i = 0; i < dc_state->stream_count; i++) {
2063 dc_state->streams[i]->mode_changed = true;
2064 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 dc_state->stream_status->plane_states[j]->update_flags.raw
2070 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2072 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2074 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2076 dc_release_state(dm->cached_dc_state);
2077 dm->cached_dc_state = NULL;
2079 amdgpu_dm_irq_resume_late(adev);
2081 mutex_unlock(&dm->dc_lock);
2085 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 dc_release_state(dm_state->context);
2087 dm_state->context = dc_create_state(dm->dc);
2088 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 dc_resource_state_construct(dm->dc, dm_state->context);
2091 /* Before powering on DC we need to re-initialize DMUB. */
2092 r = dm_dmub_hw_init(adev);
2094 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2096 /* power on hardware */
2097 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2099 /* program HPD filter */
2103 * early enable HPD Rx IRQ, should be done before set mode as short
2104 * pulse interrupts are used for MST
2106 amdgpu_dm_irq_resume_early(adev);
2108 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2109 s3_handle_mst(ddev, false);
2112 drm_connector_list_iter_begin(ddev, &iter);
2113 drm_for_each_connector_iter(connector, &iter) {
2114 aconnector = to_amdgpu_dm_connector(connector);
2117 * this is the case when traversing through already created
2118 * MST connectors, should be skipped
2120 if (aconnector->mst_port)
2123 mutex_lock(&aconnector->hpd_lock);
2124 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 DRM_ERROR("KMS: Failed to detect connector\n");
2127 if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 emulated_link_detect(aconnector->dc_link);
2130 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2132 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 aconnector->fake_enable = false;
2135 if (aconnector->dc_sink)
2136 dc_sink_release(aconnector->dc_sink);
2137 aconnector->dc_sink = NULL;
2138 amdgpu_dm_update_connector_after_detect(aconnector);
2139 mutex_unlock(&aconnector->hpd_lock);
2141 drm_connector_list_iter_end(&iter);
2143 /* Force mode set in atomic commit */
2144 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145 new_crtc_state->active_changed = true;
2148 * atomic_check is expected to create the dc states. We need to release
2149 * them here, since they were duplicated as part of the suspend
2152 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 if (dm_new_crtc_state->stream) {
2155 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 dc_stream_release(dm_new_crtc_state->stream);
2157 dm_new_crtc_state->stream = NULL;
2161 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 if (dm_new_plane_state->dc_state) {
2164 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 dc_plane_state_release(dm_new_plane_state->dc_state);
2166 dm_new_plane_state->dc_state = NULL;
2170 drm_atomic_helper_resume(ddev, dm->cached_state);
2172 dm->cached_state = NULL;
2174 amdgpu_dm_irq_resume_late(adev);
2176 amdgpu_dm_smu_write_watermarks_table(adev);
2184 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186 * the base driver's device list to be initialized and torn down accordingly.
2188 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2193 .early_init = dm_early_init,
2194 .late_init = dm_late_init,
2195 .sw_init = dm_sw_init,
2196 .sw_fini = dm_sw_fini,
2197 .hw_init = dm_hw_init,
2198 .hw_fini = dm_hw_fini,
2199 .suspend = dm_suspend,
2200 .resume = dm_resume,
2201 .is_idle = dm_is_idle,
2202 .wait_for_idle = dm_wait_for_idle,
2203 .check_soft_reset = dm_check_soft_reset,
2204 .soft_reset = dm_soft_reset,
2205 .set_clockgating_state = dm_set_clockgating_state,
2206 .set_powergating_state = dm_set_powergating_state,
2209 const struct amdgpu_ip_block_version dm_ip_block =
2211 .type = AMD_IP_BLOCK_TYPE_DCE,
2215 .funcs = &amdgpu_dm_funcs,
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226 .fb_create = amdgpu_display_user_framebuffer_create,
2227 .get_format_info = amd_get_format_info,
2228 .output_poll_changed = drm_fb_helper_output_poll_changed,
2229 .atomic_check = amdgpu_dm_atomic_check,
2230 .atomic_commit = drm_atomic_helper_commit,
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2239 u32 max_cll, min_cll, max, min, q, r;
2240 struct amdgpu_dm_backlight_caps *caps;
2241 struct amdgpu_display_manager *dm;
2242 struct drm_connector *conn_base;
2243 struct amdgpu_device *adev;
2244 struct dc_link *link = NULL;
2245 static const u8 pre_computed_values[] = {
2246 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2249 if (!aconnector || !aconnector->dc_link)
2252 link = aconnector->dc_link;
2253 if (link->connector_signal != SIGNAL_TYPE_EDP)
2256 conn_base = &aconnector->base;
2257 adev = drm_to_adev(conn_base->dev);
2259 caps = &dm->backlight_caps;
2260 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 caps->aux_support = false;
2262 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2265 if (caps->ext_caps->bits.oled == 1 ||
2266 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 caps->aux_support = true;
2270 /* From the specification (CTA-861-G), for calculating the maximum
2271 * luminance we need to use:
2272 * Luminance = 50*2**(CV/32)
2273 * Where CV is a one-byte value.
2274 * For calculating this expression we may need float point precision;
2275 * to avoid this complexity level, we take advantage that CV is divided
2276 * by a constant. From the Euclids division algorithm, we know that CV
2277 * can be written as: CV = 32*q + r. Next, we replace CV in the
2278 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2279 * need to pre-compute the value of r/32. For pre-computing the values
2280 * We just used the following Ruby line:
2281 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2282 * The results of the above expressions can be verified at
2283 * pre_computed_values.
2287 max = (1 << q) * pre_computed_values[r];
2289 // min luminance: maxLum * (CV/255)^2 / 100
2290 q = DIV_ROUND_CLOSEST(min_cll, 255);
2291 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2293 caps->aux_max_input_signal = max;
2294 caps->aux_min_input_signal = min;
2297 void amdgpu_dm_update_connector_after_detect(
2298 struct amdgpu_dm_connector *aconnector)
2300 struct drm_connector *connector = &aconnector->base;
2301 struct drm_device *dev = connector->dev;
2302 struct dc_sink *sink;
2304 /* MST handled by drm_mst framework */
2305 if (aconnector->mst_mgr.mst_state == true)
2308 sink = aconnector->dc_link->local_sink;
2310 dc_sink_retain(sink);
2313 * Edid mgmt connector gets first update only in mode_valid hook and then
2314 * the connector sink is set to either fake or physical sink depends on link status.
2315 * Skip if already done during boot.
2317 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2318 && aconnector->dc_em_sink) {
2321 * For S3 resume with headless use eml_sink to fake stream
2322 * because on resume connector->sink is set to NULL
2324 mutex_lock(&dev->mode_config.mutex);
2327 if (aconnector->dc_sink) {
2328 amdgpu_dm_update_freesync_caps(connector, NULL);
2330 * retain and release below are used to
2331 * bump up refcount for sink because the link doesn't point
2332 * to it anymore after disconnect, so on next crtc to connector
2333 * reshuffle by UMD we will get into unwanted dc_sink release
2335 dc_sink_release(aconnector->dc_sink);
2337 aconnector->dc_sink = sink;
2338 dc_sink_retain(aconnector->dc_sink);
2339 amdgpu_dm_update_freesync_caps(connector,
2342 amdgpu_dm_update_freesync_caps(connector, NULL);
2343 if (!aconnector->dc_sink) {
2344 aconnector->dc_sink = aconnector->dc_em_sink;
2345 dc_sink_retain(aconnector->dc_sink);
2349 mutex_unlock(&dev->mode_config.mutex);
2352 dc_sink_release(sink);
2357 * TODO: temporary guard to look for proper fix
2358 * if this sink is MST sink, we should not do anything
2360 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2361 dc_sink_release(sink);
2365 if (aconnector->dc_sink == sink) {
2367 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2370 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2371 aconnector->connector_id);
2373 dc_sink_release(sink);
2377 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2378 aconnector->connector_id, aconnector->dc_sink, sink);
2380 mutex_lock(&dev->mode_config.mutex);
2383 * 1. Update status of the drm connector
2384 * 2. Send an event and let userspace tell us what to do
2388 * TODO: check if we still need the S3 mode update workaround.
2389 * If yes, put it here.
2391 if (aconnector->dc_sink) {
2392 amdgpu_dm_update_freesync_caps(connector, NULL);
2393 dc_sink_release(aconnector->dc_sink);
2396 aconnector->dc_sink = sink;
2397 dc_sink_retain(aconnector->dc_sink);
2398 if (sink->dc_edid.length == 0) {
2399 aconnector->edid = NULL;
2400 if (aconnector->dc_link->aux_mode) {
2401 drm_dp_cec_unset_edid(
2402 &aconnector->dm_dp_aux.aux);
2406 (struct edid *)sink->dc_edid.raw_edid;
2408 drm_connector_update_edid_property(connector,
2410 if (aconnector->dc_link->aux_mode)
2411 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2415 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2416 update_connector_ext_caps(aconnector);
2418 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2419 amdgpu_dm_update_freesync_caps(connector, NULL);
2420 drm_connector_update_edid_property(connector, NULL);
2421 aconnector->num_modes = 0;
2422 dc_sink_release(aconnector->dc_sink);
2423 aconnector->dc_sink = NULL;
2424 aconnector->edid = NULL;
2425 #ifdef CONFIG_DRM_AMD_DC_HDCP
2426 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2427 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2428 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2432 mutex_unlock(&dev->mode_config.mutex);
2434 update_subconnector_property(aconnector);
2437 dc_sink_release(sink);
2440 static void handle_hpd_irq(void *param)
2442 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443 struct drm_connector *connector = &aconnector->base;
2444 struct drm_device *dev = connector->dev;
2445 enum dc_connection_type new_connection_type = dc_connection_none;
2446 #ifdef CONFIG_DRM_AMD_DC_HDCP
2447 struct amdgpu_device *adev = drm_to_adev(dev);
2448 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2452 * In case of failure or MST no need to update connector status or notify the OS
2453 * since (for MST case) MST does this in its own context.
2455 mutex_lock(&aconnector->hpd_lock);
2457 #ifdef CONFIG_DRM_AMD_DC_HDCP
2458 if (adev->dm.hdcp_workqueue) {
2459 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2460 dm_con_state->update_hdcp = true;
2463 if (aconnector->fake_enable)
2464 aconnector->fake_enable = false;
2466 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2467 DRM_ERROR("KMS: Failed to detect connector\n");
2469 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2470 emulated_link_detect(aconnector->dc_link);
2473 drm_modeset_lock_all(dev);
2474 dm_restore_drm_connector_state(dev, connector);
2475 drm_modeset_unlock_all(dev);
2477 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2478 drm_kms_helper_hotplug_event(dev);
2480 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2481 if (new_connection_type == dc_connection_none &&
2482 aconnector->dc_link->type == dc_connection_none)
2483 dm_set_dpms_off(aconnector->dc_link);
2485 amdgpu_dm_update_connector_after_detect(aconnector);
2487 drm_modeset_lock_all(dev);
2488 dm_restore_drm_connector_state(dev, connector);
2489 drm_modeset_unlock_all(dev);
2491 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2492 drm_kms_helper_hotplug_event(dev);
2494 mutex_unlock(&aconnector->hpd_lock);
2498 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2500 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2502 bool new_irq_handled = false;
2504 int dpcd_bytes_to_read;
2506 const int max_process_count = 30;
2507 int process_count = 0;
2509 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2511 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2512 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2513 /* DPCD 0x200 - 0x201 for downstream IRQ */
2514 dpcd_addr = DP_SINK_COUNT;
2516 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2517 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2518 dpcd_addr = DP_SINK_COUNT_ESI;
2521 dret = drm_dp_dpcd_read(
2522 &aconnector->dm_dp_aux.aux,
2525 dpcd_bytes_to_read);
2527 while (dret == dpcd_bytes_to_read &&
2528 process_count < max_process_count) {
2534 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2535 /* handle HPD short pulse irq */
2536 if (aconnector->mst_mgr.mst_state)
2538 &aconnector->mst_mgr,
2542 if (new_irq_handled) {
2543 /* ACK at DPCD to notify down stream */
2544 const int ack_dpcd_bytes_to_write =
2545 dpcd_bytes_to_read - 1;
2547 for (retry = 0; retry < 3; retry++) {
2550 wret = drm_dp_dpcd_write(
2551 &aconnector->dm_dp_aux.aux,
2554 ack_dpcd_bytes_to_write);
2555 if (wret == ack_dpcd_bytes_to_write)
2559 /* check if there is new irq to be handled */
2560 dret = drm_dp_dpcd_read(
2561 &aconnector->dm_dp_aux.aux,
2564 dpcd_bytes_to_read);
2566 new_irq_handled = false;
2572 if (process_count == max_process_count)
2573 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2576 static void handle_hpd_rx_irq(void *param)
2578 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2579 struct drm_connector *connector = &aconnector->base;
2580 struct drm_device *dev = connector->dev;
2581 struct dc_link *dc_link = aconnector->dc_link;
2582 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2583 bool result = false;
2584 enum dc_connection_type new_connection_type = dc_connection_none;
2585 struct amdgpu_device *adev = drm_to_adev(dev);
2586 union hpd_irq_data hpd_irq_data;
2588 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2591 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2592 * conflict, after implement i2c helper, this mutex should be
2595 if (dc_link->type != dc_connection_mst_branch)
2596 mutex_lock(&aconnector->hpd_lock);
2598 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2600 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2601 (dc_link->type == dc_connection_mst_branch)) {
2602 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2604 dm_handle_hpd_rx_irq(aconnector);
2606 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2608 dm_handle_hpd_rx_irq(aconnector);
2613 mutex_lock(&adev->dm.dc_lock);
2614 #ifdef CONFIG_DRM_AMD_DC_HDCP
2615 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2617 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2619 mutex_unlock(&adev->dm.dc_lock);
2622 if (result && !is_mst_root_connector) {
2623 /* Downstream Port status changed. */
2624 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2625 DRM_ERROR("KMS: Failed to detect connector\n");
2627 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2628 emulated_link_detect(dc_link);
2630 if (aconnector->fake_enable)
2631 aconnector->fake_enable = false;
2633 amdgpu_dm_update_connector_after_detect(aconnector);
2636 drm_modeset_lock_all(dev);
2637 dm_restore_drm_connector_state(dev, connector);
2638 drm_modeset_unlock_all(dev);
2640 drm_kms_helper_hotplug_event(dev);
2641 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2643 if (aconnector->fake_enable)
2644 aconnector->fake_enable = false;
2646 amdgpu_dm_update_connector_after_detect(aconnector);
2649 drm_modeset_lock_all(dev);
2650 dm_restore_drm_connector_state(dev, connector);
2651 drm_modeset_unlock_all(dev);
2653 drm_kms_helper_hotplug_event(dev);
2656 #ifdef CONFIG_DRM_AMD_DC_HDCP
2657 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2658 if (adev->dm.hdcp_workqueue)
2659 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2663 if (dc_link->type != dc_connection_mst_branch) {
2664 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2665 mutex_unlock(&aconnector->hpd_lock);
2669 static void register_hpd_handlers(struct amdgpu_device *adev)
2671 struct drm_device *dev = adev_to_drm(adev);
2672 struct drm_connector *connector;
2673 struct amdgpu_dm_connector *aconnector;
2674 const struct dc_link *dc_link;
2675 struct dc_interrupt_params int_params = {0};
2677 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2678 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2680 list_for_each_entry(connector,
2681 &dev->mode_config.connector_list, head) {
2683 aconnector = to_amdgpu_dm_connector(connector);
2684 dc_link = aconnector->dc_link;
2686 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2687 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2688 int_params.irq_source = dc_link->irq_source_hpd;
2690 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 (void *) aconnector);
2695 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2697 /* Also register for DP short pulse (hpd_rx). */
2698 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2699 int_params.irq_source = dc_link->irq_source_hpd_rx;
2701 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2703 (void *) aconnector);
2708 #if defined(CONFIG_DRM_AMD_DC_SI)
2709 /* Register IRQ sources and initialize IRQ callbacks */
2710 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2712 struct dc *dc = adev->dm.dc;
2713 struct common_irq_params *c_irq_params;
2714 struct dc_interrupt_params int_params = {0};
2717 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2719 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2720 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2723 * Actions of amdgpu_irq_add_id():
2724 * 1. Register a set() function with base driver.
2725 * Base driver will call set() function to enable/disable an
2726 * interrupt in DC hardware.
2727 * 2. Register amdgpu_dm_irq_handler().
2728 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2729 * coming from DC hardware.
2730 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2731 * for acknowledging and handling. */
2733 /* Use VBLANK interrupt */
2734 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2735 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2737 DRM_ERROR("Failed to add crtc irq id!\n");
2741 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2742 int_params.irq_source =
2743 dc_interrupt_to_irq_source(dc, i+1 , 0);
2745 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2747 c_irq_params->adev = adev;
2748 c_irq_params->irq_src = int_params.irq_source;
2750 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2751 dm_crtc_high_irq, c_irq_params);
2754 /* Use GRPH_PFLIP interrupt */
2755 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2756 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2757 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2759 DRM_ERROR("Failed to add page flip irq id!\n");
2763 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2764 int_params.irq_source =
2765 dc_interrupt_to_irq_source(dc, i, 0);
2767 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2769 c_irq_params->adev = adev;
2770 c_irq_params->irq_src = int_params.irq_source;
2772 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2773 dm_pflip_high_irq, c_irq_params);
2778 r = amdgpu_irq_add_id(adev, client_id,
2779 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2781 DRM_ERROR("Failed to add hpd irq id!\n");
2785 register_hpd_handlers(adev);
2791 /* Register IRQ sources and initialize IRQ callbacks */
2792 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2794 struct dc *dc = adev->dm.dc;
2795 struct common_irq_params *c_irq_params;
2796 struct dc_interrupt_params int_params = {0};
2799 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2801 if (adev->asic_type >= CHIP_VEGA10)
2802 client_id = SOC15_IH_CLIENTID_DCE;
2804 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2805 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2808 * Actions of amdgpu_irq_add_id():
2809 * 1. Register a set() function with base driver.
2810 * Base driver will call set() function to enable/disable an
2811 * interrupt in DC hardware.
2812 * 2. Register amdgpu_dm_irq_handler().
2813 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2814 * coming from DC hardware.
2815 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2816 * for acknowledging and handling. */
2818 /* Use VBLANK interrupt */
2819 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2820 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2822 DRM_ERROR("Failed to add crtc irq id!\n");
2826 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2827 int_params.irq_source =
2828 dc_interrupt_to_irq_source(dc, i, 0);
2830 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2832 c_irq_params->adev = adev;
2833 c_irq_params->irq_src = int_params.irq_source;
2835 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2836 dm_crtc_high_irq, c_irq_params);
2839 /* Use VUPDATE interrupt */
2840 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2841 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2843 DRM_ERROR("Failed to add vupdate irq id!\n");
2847 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2848 int_params.irq_source =
2849 dc_interrupt_to_irq_source(dc, i, 0);
2851 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2853 c_irq_params->adev = adev;
2854 c_irq_params->irq_src = int_params.irq_source;
2856 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2857 dm_vupdate_high_irq, c_irq_params);
2860 /* Use GRPH_PFLIP interrupt */
2861 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2862 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2863 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2865 DRM_ERROR("Failed to add page flip irq id!\n");
2869 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2870 int_params.irq_source =
2871 dc_interrupt_to_irq_source(dc, i, 0);
2873 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2875 c_irq_params->adev = adev;
2876 c_irq_params->irq_src = int_params.irq_source;
2878 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2879 dm_pflip_high_irq, c_irq_params);
2884 r = amdgpu_irq_add_id(adev, client_id,
2885 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2887 DRM_ERROR("Failed to add hpd irq id!\n");
2891 register_hpd_handlers(adev);
2896 #if defined(CONFIG_DRM_AMD_DC_DCN)
2897 /* Register IRQ sources and initialize IRQ callbacks */
2898 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2900 struct dc *dc = adev->dm.dc;
2901 struct common_irq_params *c_irq_params;
2902 struct dc_interrupt_params int_params = {0};
2906 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2907 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2910 * Actions of amdgpu_irq_add_id():
2911 * 1. Register a set() function with base driver.
2912 * Base driver will call set() function to enable/disable an
2913 * interrupt in DC hardware.
2914 * 2. Register amdgpu_dm_irq_handler().
2915 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2916 * coming from DC hardware.
2917 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2918 * for acknowledging and handling.
2921 /* Use VSTARTUP interrupt */
2922 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2923 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2925 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2928 DRM_ERROR("Failed to add crtc irq id!\n");
2932 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2933 int_params.irq_source =
2934 dc_interrupt_to_irq_source(dc, i, 0);
2936 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2938 c_irq_params->adev = adev;
2939 c_irq_params->irq_src = int_params.irq_source;
2941 amdgpu_dm_irq_register_interrupt(
2942 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2945 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2946 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2947 * to trigger at end of each vblank, regardless of state of the lock,
2948 * matching DCE behaviour.
2950 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2951 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2953 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2956 DRM_ERROR("Failed to add vupdate irq id!\n");
2960 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2961 int_params.irq_source =
2962 dc_interrupt_to_irq_source(dc, i, 0);
2964 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2966 c_irq_params->adev = adev;
2967 c_irq_params->irq_src = int_params.irq_source;
2969 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2970 dm_vupdate_high_irq, c_irq_params);
2973 /* Use GRPH_PFLIP interrupt */
2974 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2975 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2977 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2979 DRM_ERROR("Failed to add page flip irq id!\n");
2983 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2984 int_params.irq_source =
2985 dc_interrupt_to_irq_source(dc, i, 0);
2987 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2989 c_irq_params->adev = adev;
2990 c_irq_params->irq_src = int_params.irq_source;
2992 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2993 dm_pflip_high_irq, c_irq_params);
2998 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3001 DRM_ERROR("Failed to add hpd irq id!\n");
3005 register_hpd_handlers(adev);
3012 * Acquires the lock for the atomic state object and returns
3013 * the new atomic state.
3015 * This should only be called during atomic check.
3017 static int dm_atomic_get_state(struct drm_atomic_state *state,
3018 struct dm_atomic_state **dm_state)
3020 struct drm_device *dev = state->dev;
3021 struct amdgpu_device *adev = drm_to_adev(dev);
3022 struct amdgpu_display_manager *dm = &adev->dm;
3023 struct drm_private_state *priv_state;
3028 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3029 if (IS_ERR(priv_state))
3030 return PTR_ERR(priv_state);
3032 *dm_state = to_dm_atomic_state(priv_state);
3037 static struct dm_atomic_state *
3038 dm_atomic_get_new_state(struct drm_atomic_state *state)
3040 struct drm_device *dev = state->dev;
3041 struct amdgpu_device *adev = drm_to_adev(dev);
3042 struct amdgpu_display_manager *dm = &adev->dm;
3043 struct drm_private_obj *obj;
3044 struct drm_private_state *new_obj_state;
3047 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3048 if (obj->funcs == dm->atomic_obj.funcs)
3049 return to_dm_atomic_state(new_obj_state);
3055 static struct drm_private_state *
3056 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3058 struct dm_atomic_state *old_state, *new_state;
3060 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3064 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3066 old_state = to_dm_atomic_state(obj->state);
3068 if (old_state && old_state->context)
3069 new_state->context = dc_copy_state(old_state->context);
3071 if (!new_state->context) {
3076 return &new_state->base;
3079 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3080 struct drm_private_state *state)
3082 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3084 if (dm_state && dm_state->context)
3085 dc_release_state(dm_state->context);
3090 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3091 .atomic_duplicate_state = dm_atomic_duplicate_state,
3092 .atomic_destroy_state = dm_atomic_destroy_state,
3095 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3097 struct dm_atomic_state *state;
3100 adev->mode_info.mode_config_initialized = true;
3102 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3103 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3105 adev_to_drm(adev)->mode_config.max_width = 16384;
3106 adev_to_drm(adev)->mode_config.max_height = 16384;
3108 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3109 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3110 /* indicates support for immediate flip */
3111 adev_to_drm(adev)->mode_config.async_page_flip = true;
3113 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3115 state = kzalloc(sizeof(*state), GFP_KERNEL);
3119 state->context = dc_create_state(adev->dm.dc);
3120 if (!state->context) {
3125 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3127 drm_atomic_private_obj_init(adev_to_drm(adev),
3128 &adev->dm.atomic_obj,
3130 &dm_atomic_state_funcs);
3132 r = amdgpu_display_modeset_create_props(adev);
3134 dc_release_state(state->context);
3139 r = amdgpu_dm_audio_init(adev);
3141 dc_release_state(state->context);
3149 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3150 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3151 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3153 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3154 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3156 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3158 #if defined(CONFIG_ACPI)
3159 struct amdgpu_dm_backlight_caps caps;
3161 memset(&caps, 0, sizeof(caps));
3163 if (dm->backlight_caps.caps_valid)
3166 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3167 if (caps.caps_valid) {
3168 dm->backlight_caps.caps_valid = true;
3169 if (caps.aux_support)
3171 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3172 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3174 dm->backlight_caps.min_input_signal =
3175 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3176 dm->backlight_caps.max_input_signal =
3177 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3180 if (dm->backlight_caps.aux_support)
3183 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3184 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3188 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3189 unsigned *min, unsigned *max)
3194 if (caps->aux_support) {
3195 // Firmware limits are in nits, DC API wants millinits.
3196 *max = 1000 * caps->aux_max_input_signal;
3197 *min = 1000 * caps->aux_min_input_signal;
3199 // Firmware limits are 8-bit, PWM control is 16-bit.
3200 *max = 0x101 * caps->max_input_signal;
3201 *min = 0x101 * caps->min_input_signal;
3206 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3207 uint32_t brightness)
3211 if (!get_brightness_range(caps, &min, &max))
3214 // Rescale 0..255 to min..max
3215 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3216 AMDGPU_MAX_BL_LEVEL);
3219 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3220 uint32_t brightness)
3224 if (!get_brightness_range(caps, &min, &max))
3227 if (brightness < min)
3229 // Rescale min..max to 0..255
3230 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3234 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3236 struct amdgpu_display_manager *dm = bl_get_data(bd);
3237 struct amdgpu_dm_backlight_caps caps;
3238 struct dc_link *link = NULL;
3242 amdgpu_dm_update_backlight_caps(dm);
3243 caps = dm->backlight_caps;
3245 link = (struct dc_link *)dm->backlight_link;
3247 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3248 // Change brightness based on AUX property
3249 if (caps.aux_support)
3250 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3251 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3253 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3258 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3260 struct amdgpu_display_manager *dm = bl_get_data(bd);
3261 struct amdgpu_dm_backlight_caps caps;
3263 amdgpu_dm_update_backlight_caps(dm);
3264 caps = dm->backlight_caps;
3266 if (caps.aux_support) {
3267 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3271 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3273 return bd->props.brightness;
3274 return convert_brightness_to_user(&caps, avg);
3276 int ret = dc_link_get_backlight_level(dm->backlight_link);
3278 if (ret == DC_ERROR_UNEXPECTED)
3279 return bd->props.brightness;
3280 return convert_brightness_to_user(&caps, ret);
3284 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3285 .options = BL_CORE_SUSPENDRESUME,
3286 .get_brightness = amdgpu_dm_backlight_get_brightness,
3287 .update_status = amdgpu_dm_backlight_update_status,
3291 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3294 struct backlight_properties props = { 0 };
3296 amdgpu_dm_update_backlight_caps(dm);
3298 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3299 props.brightness = AMDGPU_MAX_BL_LEVEL;
3300 props.type = BACKLIGHT_RAW;
3302 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3303 adev_to_drm(dm->adev)->primary->index);
3305 dm->backlight_dev = backlight_device_register(bl_name,
3306 adev_to_drm(dm->adev)->dev,
3308 &amdgpu_dm_backlight_ops,
3311 if (IS_ERR(dm->backlight_dev))
3312 DRM_ERROR("DM: Backlight registration failed!\n");
3314 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3319 static int initialize_plane(struct amdgpu_display_manager *dm,
3320 struct amdgpu_mode_info *mode_info, int plane_id,
3321 enum drm_plane_type plane_type,
3322 const struct dc_plane_cap *plane_cap)
3324 struct drm_plane *plane;
3325 unsigned long possible_crtcs;
3328 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3330 DRM_ERROR("KMS: Failed to allocate plane\n");
3333 plane->type = plane_type;
3336 * HACK: IGT tests expect that the primary plane for a CRTC
3337 * can only have one possible CRTC. Only expose support for
3338 * any CRTC if they're not going to be used as a primary plane
3339 * for a CRTC - like overlay or underlay planes.
3341 possible_crtcs = 1 << plane_id;
3342 if (plane_id >= dm->dc->caps.max_streams)
3343 possible_crtcs = 0xff;
3345 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3348 DRM_ERROR("KMS: Failed to initialize plane\n");
3354 mode_info->planes[plane_id] = plane;
3360 static void register_backlight_device(struct amdgpu_display_manager *dm,
3361 struct dc_link *link)
3363 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3364 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3366 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3367 link->type != dc_connection_none) {
3369 * Event if registration failed, we should continue with
3370 * DM initialization because not having a backlight control
3371 * is better then a black screen.
3373 amdgpu_dm_register_backlight_device(dm);
3375 if (dm->backlight_dev)
3376 dm->backlight_link = link;
3383 * In this architecture, the association
3384 * connector -> encoder -> crtc
3385 * id not really requried. The crtc and connector will hold the
3386 * display_index as an abstraction to use with DAL component
3388 * Returns 0 on success
3390 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3392 struct amdgpu_display_manager *dm = &adev->dm;
3394 struct amdgpu_dm_connector *aconnector = NULL;
3395 struct amdgpu_encoder *aencoder = NULL;
3396 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3398 int32_t primary_planes;
3399 enum dc_connection_type new_connection_type = dc_connection_none;
3400 const struct dc_plane_cap *plane;
3402 dm->display_indexes_num = dm->dc->caps.max_streams;
3403 /* Update the actual used number of crtc */
3404 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3406 link_cnt = dm->dc->caps.max_links;
3407 if (amdgpu_dm_mode_config_init(dm->adev)) {
3408 DRM_ERROR("DM: Failed to initialize mode config\n");
3412 /* There is one primary plane per CRTC */
3413 primary_planes = dm->dc->caps.max_streams;
3414 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3417 * Initialize primary planes, implicit planes for legacy IOCTLS.
3418 * Order is reversed to match iteration order in atomic check.
3420 for (i = (primary_planes - 1); i >= 0; i--) {
3421 plane = &dm->dc->caps.planes[i];
3423 if (initialize_plane(dm, mode_info, i,
3424 DRM_PLANE_TYPE_PRIMARY, plane)) {
3425 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3431 * Initialize overlay planes, index starting after primary planes.
3432 * These planes have a higher DRM index than the primary planes since
3433 * they should be considered as having a higher z-order.
3434 * Order is reversed to match iteration order in atomic check.
3436 * Only support DCN for now, and only expose one so we don't encourage
3437 * userspace to use up all the pipes.
3439 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3440 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3442 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3445 if (!plane->blends_with_above || !plane->blends_with_below)
3448 if (!plane->pixel_format_support.argb8888)
3451 if (initialize_plane(dm, NULL, primary_planes + i,
3452 DRM_PLANE_TYPE_OVERLAY, plane)) {
3453 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3457 /* Only create one overlay plane. */
3461 for (i = 0; i < dm->dc->caps.max_streams; i++)
3462 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3463 DRM_ERROR("KMS: Failed to initialize crtc\n");
3467 /* loops over all connectors on the board */
3468 for (i = 0; i < link_cnt; i++) {
3469 struct dc_link *link = NULL;
3471 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3473 "KMS: Cannot support more than %d display indexes\n",
3474 AMDGPU_DM_MAX_DISPLAY_INDEX);
3478 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3482 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3486 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3487 DRM_ERROR("KMS: Failed to initialize encoder\n");
3491 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3492 DRM_ERROR("KMS: Failed to initialize connector\n");
3496 link = dc_get_link_at_index(dm->dc, i);
3498 if (!dc_link_detect_sink(link, &new_connection_type))
3499 DRM_ERROR("KMS: Failed to detect connector\n");
3501 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3502 emulated_link_detect(link);
3503 amdgpu_dm_update_connector_after_detect(aconnector);
3505 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3506 amdgpu_dm_update_connector_after_detect(aconnector);
3507 register_backlight_device(dm, link);
3508 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3509 amdgpu_dm_set_psr_caps(link);
3515 /* Software is initialized. Now we can register interrupt handlers. */
3516 switch (adev->asic_type) {
3517 #if defined(CONFIG_DRM_AMD_DC_SI)
3522 if (dce60_register_irq_handlers(dm->adev)) {
3523 DRM_ERROR("DM: Failed to initialize IRQ\n");
3537 case CHIP_POLARIS11:
3538 case CHIP_POLARIS10:
3539 case CHIP_POLARIS12:
3544 if (dce110_register_irq_handlers(dm->adev)) {
3545 DRM_ERROR("DM: Failed to initialize IRQ\n");
3549 #if defined(CONFIG_DRM_AMD_DC_DCN)
3555 case CHIP_SIENNA_CICHLID:
3556 case CHIP_NAVY_FLOUNDER:
3557 case CHIP_DIMGREY_CAVEFISH:
3559 if (dcn10_register_irq_handlers(dm->adev)) {
3560 DRM_ERROR("DM: Failed to initialize IRQ\n");
3566 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3578 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3580 drm_mode_config_cleanup(dm->ddev);
3581 drm_atomic_private_obj_fini(&dm->atomic_obj);
3585 /******************************************************************************
3586 * amdgpu_display_funcs functions
3587 *****************************************************************************/
3590 * dm_bandwidth_update - program display watermarks
3592 * @adev: amdgpu_device pointer
3594 * Calculate and program the display watermarks and line buffer allocation.
3596 static void dm_bandwidth_update(struct amdgpu_device *adev)
3598 /* TODO: implement later */
3601 static const struct amdgpu_display_funcs dm_display_funcs = {
3602 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3603 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3604 .backlight_set_level = NULL, /* never called for DC */
3605 .backlight_get_level = NULL, /* never called for DC */
3606 .hpd_sense = NULL,/* called unconditionally */
3607 .hpd_set_polarity = NULL, /* called unconditionally */
3608 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3609 .page_flip_get_scanoutpos =
3610 dm_crtc_get_scanoutpos,/* called unconditionally */
3611 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3612 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3615 #if defined(CONFIG_DEBUG_KERNEL_DC)
3617 static ssize_t s3_debug_store(struct device *device,
3618 struct device_attribute *attr,
3624 struct drm_device *drm_dev = dev_get_drvdata(device);
3625 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3627 ret = kstrtoint(buf, 0, &s3_state);
3632 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3637 return ret == 0 ? count : 0;
3640 DEVICE_ATTR_WO(s3_debug);
3644 static int dm_early_init(void *handle)
3646 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3648 switch (adev->asic_type) {
3649 #if defined(CONFIG_DRM_AMD_DC_SI)
3653 adev->mode_info.num_crtc = 6;
3654 adev->mode_info.num_hpd = 6;
3655 adev->mode_info.num_dig = 6;
3658 adev->mode_info.num_crtc = 2;
3659 adev->mode_info.num_hpd = 2;
3660 adev->mode_info.num_dig = 2;
3665 adev->mode_info.num_crtc = 6;
3666 adev->mode_info.num_hpd = 6;
3667 adev->mode_info.num_dig = 6;
3670 adev->mode_info.num_crtc = 4;
3671 adev->mode_info.num_hpd = 6;
3672 adev->mode_info.num_dig = 7;
3676 adev->mode_info.num_crtc = 2;
3677 adev->mode_info.num_hpd = 6;
3678 adev->mode_info.num_dig = 6;
3682 adev->mode_info.num_crtc = 6;
3683 adev->mode_info.num_hpd = 6;
3684 adev->mode_info.num_dig = 7;
3687 adev->mode_info.num_crtc = 3;
3688 adev->mode_info.num_hpd = 6;
3689 adev->mode_info.num_dig = 9;
3692 adev->mode_info.num_crtc = 2;
3693 adev->mode_info.num_hpd = 6;
3694 adev->mode_info.num_dig = 9;
3696 case CHIP_POLARIS11:
3697 case CHIP_POLARIS12:
3698 adev->mode_info.num_crtc = 5;
3699 adev->mode_info.num_hpd = 5;
3700 adev->mode_info.num_dig = 5;
3702 case CHIP_POLARIS10:
3704 adev->mode_info.num_crtc = 6;
3705 adev->mode_info.num_hpd = 6;
3706 adev->mode_info.num_dig = 6;
3711 adev->mode_info.num_crtc = 6;
3712 adev->mode_info.num_hpd = 6;
3713 adev->mode_info.num_dig = 6;
3715 #if defined(CONFIG_DRM_AMD_DC_DCN)
3719 adev->mode_info.num_crtc = 4;
3720 adev->mode_info.num_hpd = 4;
3721 adev->mode_info.num_dig = 4;
3725 case CHIP_SIENNA_CICHLID:
3726 case CHIP_NAVY_FLOUNDER:
3727 adev->mode_info.num_crtc = 6;
3728 adev->mode_info.num_hpd = 6;
3729 adev->mode_info.num_dig = 6;
3732 case CHIP_DIMGREY_CAVEFISH:
3733 adev->mode_info.num_crtc = 5;
3734 adev->mode_info.num_hpd = 5;
3735 adev->mode_info.num_dig = 5;
3739 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3743 amdgpu_dm_set_irq_funcs(adev);
3745 if (adev->mode_info.funcs == NULL)
3746 adev->mode_info.funcs = &dm_display_funcs;
3749 * Note: Do NOT change adev->audio_endpt_rreg and
3750 * adev->audio_endpt_wreg because they are initialised in
3751 * amdgpu_device_init()
3753 #if defined(CONFIG_DEBUG_KERNEL_DC)
3755 adev_to_drm(adev)->dev,
3756 &dev_attr_s3_debug);
3762 static bool modeset_required(struct drm_crtc_state *crtc_state,
3763 struct dc_stream_state *new_stream,
3764 struct dc_stream_state *old_stream)
3766 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3769 static bool modereset_required(struct drm_crtc_state *crtc_state)
3771 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3774 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3776 drm_encoder_cleanup(encoder);
3780 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3781 .destroy = amdgpu_dm_encoder_destroy,
3785 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3786 struct drm_framebuffer *fb,
3787 int *min_downscale, int *max_upscale)
3789 struct amdgpu_device *adev = drm_to_adev(dev);
3790 struct dc *dc = adev->dm.dc;
3791 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3792 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3794 switch (fb->format->format) {
3795 case DRM_FORMAT_P010:
3796 case DRM_FORMAT_NV12:
3797 case DRM_FORMAT_NV21:
3798 *max_upscale = plane_cap->max_upscale_factor.nv12;
3799 *min_downscale = plane_cap->max_downscale_factor.nv12;
3802 case DRM_FORMAT_XRGB16161616F:
3803 case DRM_FORMAT_ARGB16161616F:
3804 case DRM_FORMAT_XBGR16161616F:
3805 case DRM_FORMAT_ABGR16161616F:
3806 *max_upscale = plane_cap->max_upscale_factor.fp16;
3807 *min_downscale = plane_cap->max_downscale_factor.fp16;
3811 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3812 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3817 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3818 * scaling factor of 1.0 == 1000 units.
3820 if (*max_upscale == 1)
3821 *max_upscale = 1000;
3823 if (*min_downscale == 1)
3824 *min_downscale = 1000;
3828 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3829 struct dc_scaling_info *scaling_info)
3831 int scale_w, scale_h, min_downscale, max_upscale;
3833 memset(scaling_info, 0, sizeof(*scaling_info));
3835 /* Source is fixed 16.16 but we ignore mantissa for now... */
3836 scaling_info->src_rect.x = state->src_x >> 16;
3837 scaling_info->src_rect.y = state->src_y >> 16;
3839 scaling_info->src_rect.width = state->src_w >> 16;
3840 if (scaling_info->src_rect.width == 0)
3843 scaling_info->src_rect.height = state->src_h >> 16;
3844 if (scaling_info->src_rect.height == 0)
3847 scaling_info->dst_rect.x = state->crtc_x;
3848 scaling_info->dst_rect.y = state->crtc_y;
3850 if (state->crtc_w == 0)
3853 scaling_info->dst_rect.width = state->crtc_w;
3855 if (state->crtc_h == 0)
3858 scaling_info->dst_rect.height = state->crtc_h;
3860 /* DRM doesn't specify clipping on destination output. */
3861 scaling_info->clip_rect = scaling_info->dst_rect;
3863 /* Validate scaling per-format with DC plane caps */
3864 if (state->plane && state->plane->dev && state->fb) {
3865 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3866 &min_downscale, &max_upscale);
3868 min_downscale = 250;
3869 max_upscale = 16000;
3872 scale_w = scaling_info->dst_rect.width * 1000 /
3873 scaling_info->src_rect.width;
3875 if (scale_w < min_downscale || scale_w > max_upscale)
3878 scale_h = scaling_info->dst_rect.height * 1000 /
3879 scaling_info->src_rect.height;
3881 if (scale_h < min_downscale || scale_h > max_upscale)
3885 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3886 * assume reasonable defaults based on the format.
3893 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3894 uint64_t tiling_flags)
3896 /* Fill GFX8 params */
3897 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3898 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3900 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3901 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3902 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3903 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3904 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3906 /* XXX fix me for VI */
3907 tiling_info->gfx8.num_banks = num_banks;
3908 tiling_info->gfx8.array_mode =
3909 DC_ARRAY_2D_TILED_THIN1;
3910 tiling_info->gfx8.tile_split = tile_split;
3911 tiling_info->gfx8.bank_width = bankw;
3912 tiling_info->gfx8.bank_height = bankh;
3913 tiling_info->gfx8.tile_aspect = mtaspect;
3914 tiling_info->gfx8.tile_mode =
3915 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3916 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3917 == DC_ARRAY_1D_TILED_THIN1) {
3918 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3921 tiling_info->gfx8.pipe_config =
3922 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3926 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3927 union dc_tiling_info *tiling_info)
3929 tiling_info->gfx9.num_pipes =
3930 adev->gfx.config.gb_addr_config_fields.num_pipes;
3931 tiling_info->gfx9.num_banks =
3932 adev->gfx.config.gb_addr_config_fields.num_banks;
3933 tiling_info->gfx9.pipe_interleave =
3934 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3935 tiling_info->gfx9.num_shader_engines =
3936 adev->gfx.config.gb_addr_config_fields.num_se;
3937 tiling_info->gfx9.max_compressed_frags =
3938 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3939 tiling_info->gfx9.num_rb_per_se =
3940 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3941 tiling_info->gfx9.shaderEnable = 1;
3942 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3943 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3944 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3945 adev->asic_type == CHIP_VANGOGH)
3946 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3950 validate_dcc(struct amdgpu_device *adev,
3951 const enum surface_pixel_format format,
3952 const enum dc_rotation_angle rotation,
3953 const union dc_tiling_info *tiling_info,
3954 const struct dc_plane_dcc_param *dcc,
3955 const struct dc_plane_address *address,
3956 const struct plane_size *plane_size)
3958 struct dc *dc = adev->dm.dc;
3959 struct dc_dcc_surface_param input;
3960 struct dc_surface_dcc_cap output;
3962 memset(&input, 0, sizeof(input));
3963 memset(&output, 0, sizeof(output));
3968 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3969 !dc->cap_funcs.get_dcc_compression_cap)
3972 input.format = format;
3973 input.surface_size.width = plane_size->surface_size.width;
3974 input.surface_size.height = plane_size->surface_size.height;
3975 input.swizzle_mode = tiling_info->gfx9.swizzle;
3977 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3978 input.scan = SCAN_DIRECTION_HORIZONTAL;
3979 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3980 input.scan = SCAN_DIRECTION_VERTICAL;
3982 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3985 if (!output.capable)
3988 if (dcc->independent_64b_blks == 0 &&
3989 output.grph.rgb.independent_64b_blks != 0)
3996 modifier_has_dcc(uint64_t modifier)
3998 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4002 modifier_gfx9_swizzle_mode(uint64_t modifier)
4004 if (modifier == DRM_FORMAT_MOD_LINEAR)
4007 return AMD_FMT_MOD_GET(TILE, modifier);
4010 static const struct drm_format_info *
4011 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4013 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4017 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4018 union dc_tiling_info *tiling_info,
4021 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4022 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4023 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4024 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4026 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4028 if (!IS_AMD_FMT_MOD(modifier))
4031 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4032 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4034 if (adev->family >= AMDGPU_FAMILY_NV) {
4035 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4037 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4039 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4043 enum dm_micro_swizzle {
4044 MICRO_SWIZZLE_Z = 0,
4045 MICRO_SWIZZLE_S = 1,
4046 MICRO_SWIZZLE_D = 2,
4050 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4054 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4055 const struct drm_format_info *info = drm_format_info(format);
4057 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4063 * We always have to allow this modifier, because core DRM still
4064 * checks LINEAR support if userspace does not provide modifers.
4066 if (modifier == DRM_FORMAT_MOD_LINEAR)
4070 * The arbitrary tiling support for multiplane formats has not been hooked
4073 if (info->num_planes > 1)
4077 * For D swizzle the canonical modifier depends on the bpp, so check
4080 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4081 adev->family >= AMDGPU_FAMILY_NV) {
4082 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4086 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4090 if (modifier_has_dcc(modifier)) {
4091 /* Per radeonsi comments 16/64 bpp are more complicated. */
4092 if (info->cpp[0] != 4)
4100 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4105 if (*cap - *size < 1) {
4106 uint64_t new_cap = *cap * 2;
4107 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4115 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4121 (*mods)[*size] = mod;
4126 add_gfx9_modifiers(const struct amdgpu_device *adev,
4127 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4129 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4130 int pipe_xor_bits = min(8, pipes +
4131 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4132 int bank_xor_bits = min(8 - pipe_xor_bits,
4133 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4134 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4135 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4138 if (adev->family == AMDGPU_FAMILY_RV) {
4139 /* Raven2 and later */
4140 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4143 * No _D DCC swizzles yet because we only allow 32bpp, which
4144 * doesn't support _D on DCN
4147 if (has_constant_encode) {
4148 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4149 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4150 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4151 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4152 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4153 AMD_FMT_MOD_SET(DCC, 1) |
4154 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4155 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4156 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4159 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4160 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4161 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4162 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4163 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4164 AMD_FMT_MOD_SET(DCC, 1) |
4165 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4166 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4167 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4169 if (has_constant_encode) {
4170 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4171 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4172 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4173 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4174 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4175 AMD_FMT_MOD_SET(DCC, 1) |
4176 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4177 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4178 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4180 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4181 AMD_FMT_MOD_SET(RB, rb) |
4182 AMD_FMT_MOD_SET(PIPE, pipes));
4185 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4186 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4187 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4188 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4189 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4190 AMD_FMT_MOD_SET(DCC, 1) |
4191 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4192 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4193 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4194 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4195 AMD_FMT_MOD_SET(RB, rb) |
4196 AMD_FMT_MOD_SET(PIPE, pipes));
4200 * Only supported for 64bpp on Raven, will be filtered on format in
4201 * dm_plane_format_mod_supported.
4203 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4204 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4205 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4206 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4207 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4209 if (adev->family == AMDGPU_FAMILY_RV) {
4210 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4211 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4212 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4213 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4214 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4218 * Only supported for 64bpp on Raven, will be filtered on format in
4219 * dm_plane_format_mod_supported.
4221 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4222 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4223 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4225 if (adev->family == AMDGPU_FAMILY_RV) {
4226 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4228 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4233 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4234 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4236 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4238 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242 AMD_FMT_MOD_SET(DCC, 1) |
4243 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4244 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4245 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4247 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4249 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4250 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4251 AMD_FMT_MOD_SET(DCC, 1) |
4252 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4253 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4254 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4255 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4257 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4258 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4259 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4260 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4262 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4264 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4268 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4269 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4270 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4271 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4273 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4274 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4275 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4279 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4280 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4282 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4283 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4285 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4286 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4287 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4288 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4289 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4290 AMD_FMT_MOD_SET(DCC, 1) |
4291 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4292 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4293 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4294 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4296 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4297 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4298 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4299 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4300 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4301 AMD_FMT_MOD_SET(DCC, 1) |
4302 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4303 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4304 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4305 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4306 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4308 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4309 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4310 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4311 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4312 AMD_FMT_MOD_SET(PACKERS, pkrs));
4314 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4315 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4316 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4317 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4318 AMD_FMT_MOD_SET(PACKERS, pkrs));
4320 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4321 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4322 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4323 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4325 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4326 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4327 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4331 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4333 uint64_t size = 0, capacity = 128;
4336 /* We have not hooked up any pre-GFX9 modifiers. */
4337 if (adev->family < AMDGPU_FAMILY_AI)
4340 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4342 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4343 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4344 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4345 return *mods ? 0 : -ENOMEM;
4348 switch (adev->family) {
4349 case AMDGPU_FAMILY_AI:
4350 case AMDGPU_FAMILY_RV:
4351 add_gfx9_modifiers(adev, mods, &size, &capacity);
4353 case AMDGPU_FAMILY_NV:
4354 case AMDGPU_FAMILY_VGH:
4355 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4356 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4358 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4362 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4364 /* INVALID marks the end of the list. */
4365 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4374 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4375 const struct amdgpu_framebuffer *afb,
4376 const enum surface_pixel_format format,
4377 const enum dc_rotation_angle rotation,
4378 const struct plane_size *plane_size,
4379 union dc_tiling_info *tiling_info,
4380 struct dc_plane_dcc_param *dcc,
4381 struct dc_plane_address *address,
4382 const bool force_disable_dcc)
4384 const uint64_t modifier = afb->base.modifier;
4387 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4388 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4390 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4391 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4394 dcc->meta_pitch = afb->base.pitches[1];
4395 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4397 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4398 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4401 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4409 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4410 const struct amdgpu_framebuffer *afb,
4411 const enum surface_pixel_format format,
4412 const enum dc_rotation_angle rotation,
4413 const uint64_t tiling_flags,
4414 union dc_tiling_info *tiling_info,
4415 struct plane_size *plane_size,
4416 struct dc_plane_dcc_param *dcc,
4417 struct dc_plane_address *address,
4419 bool force_disable_dcc)
4421 const struct drm_framebuffer *fb = &afb->base;
4424 memset(tiling_info, 0, sizeof(*tiling_info));
4425 memset(plane_size, 0, sizeof(*plane_size));
4426 memset(dcc, 0, sizeof(*dcc));
4427 memset(address, 0, sizeof(*address));
4429 address->tmz_surface = tmz_surface;
4431 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4432 uint64_t addr = afb->address + fb->offsets[0];
4434 plane_size->surface_size.x = 0;
4435 plane_size->surface_size.y = 0;
4436 plane_size->surface_size.width = fb->width;
4437 plane_size->surface_size.height = fb->height;
4438 plane_size->surface_pitch =
4439 fb->pitches[0] / fb->format->cpp[0];
4441 address->type = PLN_ADDR_TYPE_GRAPHICS;
4442 address->grph.addr.low_part = lower_32_bits(addr);
4443 address->grph.addr.high_part = upper_32_bits(addr);
4444 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4445 uint64_t luma_addr = afb->address + fb->offsets[0];
4446 uint64_t chroma_addr = afb->address + fb->offsets[1];
4448 plane_size->surface_size.x = 0;
4449 plane_size->surface_size.y = 0;
4450 plane_size->surface_size.width = fb->width;
4451 plane_size->surface_size.height = fb->height;
4452 plane_size->surface_pitch =
4453 fb->pitches[0] / fb->format->cpp[0];
4455 plane_size->chroma_size.x = 0;
4456 plane_size->chroma_size.y = 0;
4457 /* TODO: set these based on surface format */
4458 plane_size->chroma_size.width = fb->width / 2;
4459 plane_size->chroma_size.height = fb->height / 2;
4461 plane_size->chroma_pitch =
4462 fb->pitches[1] / fb->format->cpp[1];
4464 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4465 address->video_progressive.luma_addr.low_part =
4466 lower_32_bits(luma_addr);
4467 address->video_progressive.luma_addr.high_part =
4468 upper_32_bits(luma_addr);
4469 address->video_progressive.chroma_addr.low_part =
4470 lower_32_bits(chroma_addr);
4471 address->video_progressive.chroma_addr.high_part =
4472 upper_32_bits(chroma_addr);
4475 if (adev->family >= AMDGPU_FAMILY_AI) {
4476 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4477 rotation, plane_size,
4484 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4491 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4492 bool *per_pixel_alpha, bool *global_alpha,
4493 int *global_alpha_value)
4495 *per_pixel_alpha = false;
4496 *global_alpha = false;
4497 *global_alpha_value = 0xff;
4499 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4502 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4503 static const uint32_t alpha_formats[] = {
4504 DRM_FORMAT_ARGB8888,
4505 DRM_FORMAT_RGBA8888,
4506 DRM_FORMAT_ABGR8888,
4508 uint32_t format = plane_state->fb->format->format;
4511 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4512 if (format == alpha_formats[i]) {
4513 *per_pixel_alpha = true;
4519 if (plane_state->alpha < 0xffff) {
4520 *global_alpha = true;
4521 *global_alpha_value = plane_state->alpha >> 8;
4526 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4527 const enum surface_pixel_format format,
4528 enum dc_color_space *color_space)
4532 *color_space = COLOR_SPACE_SRGB;
4534 /* DRM color properties only affect non-RGB formats. */
4535 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4538 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4540 switch (plane_state->color_encoding) {
4541 case DRM_COLOR_YCBCR_BT601:
4543 *color_space = COLOR_SPACE_YCBCR601;
4545 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4548 case DRM_COLOR_YCBCR_BT709:
4550 *color_space = COLOR_SPACE_YCBCR709;
4552 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4555 case DRM_COLOR_YCBCR_BT2020:
4557 *color_space = COLOR_SPACE_2020_YCBCR;
4570 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4571 const struct drm_plane_state *plane_state,
4572 const uint64_t tiling_flags,
4573 struct dc_plane_info *plane_info,
4574 struct dc_plane_address *address,
4576 bool force_disable_dcc)
4578 const struct drm_framebuffer *fb = plane_state->fb;
4579 const struct amdgpu_framebuffer *afb =
4580 to_amdgpu_framebuffer(plane_state->fb);
4581 struct drm_format_name_buf format_name;
4584 memset(plane_info, 0, sizeof(*plane_info));
4586 switch (fb->format->format) {
4588 plane_info->format =
4589 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4591 case DRM_FORMAT_RGB565:
4592 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4594 case DRM_FORMAT_XRGB8888:
4595 case DRM_FORMAT_ARGB8888:
4596 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4598 case DRM_FORMAT_XRGB2101010:
4599 case DRM_FORMAT_ARGB2101010:
4600 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4602 case DRM_FORMAT_XBGR2101010:
4603 case DRM_FORMAT_ABGR2101010:
4604 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4606 case DRM_FORMAT_XBGR8888:
4607 case DRM_FORMAT_ABGR8888:
4608 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4610 case DRM_FORMAT_NV21:
4611 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4613 case DRM_FORMAT_NV12:
4614 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4616 case DRM_FORMAT_P010:
4617 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4619 case DRM_FORMAT_XRGB16161616F:
4620 case DRM_FORMAT_ARGB16161616F:
4621 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4623 case DRM_FORMAT_XBGR16161616F:
4624 case DRM_FORMAT_ABGR16161616F:
4625 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4629 "Unsupported screen format %s\n",
4630 drm_get_format_name(fb->format->format, &format_name));
4634 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4635 case DRM_MODE_ROTATE_0:
4636 plane_info->rotation = ROTATION_ANGLE_0;
4638 case DRM_MODE_ROTATE_90:
4639 plane_info->rotation = ROTATION_ANGLE_90;
4641 case DRM_MODE_ROTATE_180:
4642 plane_info->rotation = ROTATION_ANGLE_180;
4644 case DRM_MODE_ROTATE_270:
4645 plane_info->rotation = ROTATION_ANGLE_270;
4648 plane_info->rotation = ROTATION_ANGLE_0;
4652 plane_info->visible = true;
4653 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4655 plane_info->layer_index = 0;
4657 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4658 &plane_info->color_space);
4662 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4663 plane_info->rotation, tiling_flags,
4664 &plane_info->tiling_info,
4665 &plane_info->plane_size,
4666 &plane_info->dcc, address, tmz_surface,
4671 fill_blending_from_plane_state(
4672 plane_state, &plane_info->per_pixel_alpha,
4673 &plane_info->global_alpha, &plane_info->global_alpha_value);
4678 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4679 struct dc_plane_state *dc_plane_state,
4680 struct drm_plane_state *plane_state,
4681 struct drm_crtc_state *crtc_state)
4683 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4684 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4685 struct dc_scaling_info scaling_info;
4686 struct dc_plane_info plane_info;
4688 bool force_disable_dcc = false;
4690 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4694 dc_plane_state->src_rect = scaling_info.src_rect;
4695 dc_plane_state->dst_rect = scaling_info.dst_rect;
4696 dc_plane_state->clip_rect = scaling_info.clip_rect;
4697 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4699 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4700 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4703 &dc_plane_state->address,
4709 dc_plane_state->format = plane_info.format;
4710 dc_plane_state->color_space = plane_info.color_space;
4711 dc_plane_state->format = plane_info.format;
4712 dc_plane_state->plane_size = plane_info.plane_size;
4713 dc_plane_state->rotation = plane_info.rotation;
4714 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4715 dc_plane_state->stereo_format = plane_info.stereo_format;
4716 dc_plane_state->tiling_info = plane_info.tiling_info;
4717 dc_plane_state->visible = plane_info.visible;
4718 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4719 dc_plane_state->global_alpha = plane_info.global_alpha;
4720 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4721 dc_plane_state->dcc = plane_info.dcc;
4722 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4725 * Always set input transfer function, since plane state is refreshed
4728 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4735 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4736 const struct dm_connector_state *dm_state,
4737 struct dc_stream_state *stream)
4739 enum amdgpu_rmx_type rmx_type;
4741 struct rect src = { 0 }; /* viewport in composition space*/
4742 struct rect dst = { 0 }; /* stream addressable area */
4744 /* no mode. nothing to be done */
4748 /* Full screen scaling by default */
4749 src.width = mode->hdisplay;
4750 src.height = mode->vdisplay;
4751 dst.width = stream->timing.h_addressable;
4752 dst.height = stream->timing.v_addressable;
4755 rmx_type = dm_state->scaling;
4756 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4757 if (src.width * dst.height <
4758 src.height * dst.width) {
4759 /* height needs less upscaling/more downscaling */
4760 dst.width = src.width *
4761 dst.height / src.height;
4763 /* width needs less upscaling/more downscaling */
4764 dst.height = src.height *
4765 dst.width / src.width;
4767 } else if (rmx_type == RMX_CENTER) {
4771 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4772 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4774 if (dm_state->underscan_enable) {
4775 dst.x += dm_state->underscan_hborder / 2;
4776 dst.y += dm_state->underscan_vborder / 2;
4777 dst.width -= dm_state->underscan_hborder;
4778 dst.height -= dm_state->underscan_vborder;
4785 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4786 dst.x, dst.y, dst.width, dst.height);
4790 static enum dc_color_depth
4791 convert_color_depth_from_display_info(const struct drm_connector *connector,
4792 bool is_y420, int requested_bpc)
4799 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4800 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4802 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4804 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4807 bpc = (uint8_t)connector->display_info.bpc;
4808 /* Assume 8 bpc by default if no bpc is specified. */
4809 bpc = bpc ? bpc : 8;
4812 if (requested_bpc > 0) {
4814 * Cap display bpc based on the user requested value.
4816 * The value for state->max_bpc may not correctly updated
4817 * depending on when the connector gets added to the state
4818 * or if this was called outside of atomic check, so it
4819 * can't be used directly.
4821 bpc = min_t(u8, bpc, requested_bpc);
4823 /* Round down to the nearest even number. */
4824 bpc = bpc - (bpc & 1);
4830 * Temporary Work around, DRM doesn't parse color depth for
4831 * EDID revision before 1.4
4832 * TODO: Fix edid parsing
4834 return COLOR_DEPTH_888;
4836 return COLOR_DEPTH_666;
4838 return COLOR_DEPTH_888;
4840 return COLOR_DEPTH_101010;
4842 return COLOR_DEPTH_121212;
4844 return COLOR_DEPTH_141414;
4846 return COLOR_DEPTH_161616;
4848 return COLOR_DEPTH_UNDEFINED;
4852 static enum dc_aspect_ratio
4853 get_aspect_ratio(const struct drm_display_mode *mode_in)
4855 /* 1-1 mapping, since both enums follow the HDMI spec. */
4856 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4859 static enum dc_color_space
4860 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4862 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4864 switch (dc_crtc_timing->pixel_encoding) {
4865 case PIXEL_ENCODING_YCBCR422:
4866 case PIXEL_ENCODING_YCBCR444:
4867 case PIXEL_ENCODING_YCBCR420:
4870 * 27030khz is the separation point between HDTV and SDTV
4871 * according to HDMI spec, we use YCbCr709 and YCbCr601
4874 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4875 if (dc_crtc_timing->flags.Y_ONLY)
4877 COLOR_SPACE_YCBCR709_LIMITED;
4879 color_space = COLOR_SPACE_YCBCR709;
4881 if (dc_crtc_timing->flags.Y_ONLY)
4883 COLOR_SPACE_YCBCR601_LIMITED;
4885 color_space = COLOR_SPACE_YCBCR601;
4890 case PIXEL_ENCODING_RGB:
4891 color_space = COLOR_SPACE_SRGB;
4902 static bool adjust_colour_depth_from_display_info(
4903 struct dc_crtc_timing *timing_out,
4904 const struct drm_display_info *info)
4906 enum dc_color_depth depth = timing_out->display_color_depth;
4909 normalized_clk = timing_out->pix_clk_100hz / 10;
4910 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4911 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4912 normalized_clk /= 2;
4913 /* Adjusting pix clock following on HDMI spec based on colour depth */
4915 case COLOR_DEPTH_888:
4917 case COLOR_DEPTH_101010:
4918 normalized_clk = (normalized_clk * 30) / 24;
4920 case COLOR_DEPTH_121212:
4921 normalized_clk = (normalized_clk * 36) / 24;
4923 case COLOR_DEPTH_161616:
4924 normalized_clk = (normalized_clk * 48) / 24;
4927 /* The above depths are the only ones valid for HDMI. */
4930 if (normalized_clk <= info->max_tmds_clock) {
4931 timing_out->display_color_depth = depth;
4934 } while (--depth > COLOR_DEPTH_666);
4938 static void fill_stream_properties_from_drm_display_mode(
4939 struct dc_stream_state *stream,
4940 const struct drm_display_mode *mode_in,
4941 const struct drm_connector *connector,
4942 const struct drm_connector_state *connector_state,
4943 const struct dc_stream_state *old_stream,
4946 struct dc_crtc_timing *timing_out = &stream->timing;
4947 const struct drm_display_info *info = &connector->display_info;
4948 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4949 struct hdmi_vendor_infoframe hv_frame;
4950 struct hdmi_avi_infoframe avi_frame;
4952 memset(&hv_frame, 0, sizeof(hv_frame));
4953 memset(&avi_frame, 0, sizeof(avi_frame));
4955 timing_out->h_border_left = 0;
4956 timing_out->h_border_right = 0;
4957 timing_out->v_border_top = 0;
4958 timing_out->v_border_bottom = 0;
4959 /* TODO: un-hardcode */
4960 if (drm_mode_is_420_only(info, mode_in)
4961 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4962 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4963 else if (drm_mode_is_420_also(info, mode_in)
4964 && aconnector->force_yuv420_output)
4965 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4966 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4967 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4968 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4970 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4972 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4973 timing_out->display_color_depth = convert_color_depth_from_display_info(
4975 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4977 timing_out->scan_type = SCANNING_TYPE_NODATA;
4978 timing_out->hdmi_vic = 0;
4981 timing_out->vic = old_stream->timing.vic;
4982 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4983 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4985 timing_out->vic = drm_match_cea_mode(mode_in);
4986 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4987 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4988 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4989 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4992 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4993 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4994 timing_out->vic = avi_frame.video_code;
4995 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4996 timing_out->hdmi_vic = hv_frame.vic;
4999 timing_out->h_addressable = mode_in->crtc_hdisplay;
5000 timing_out->h_total = mode_in->crtc_htotal;
5001 timing_out->h_sync_width =
5002 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5003 timing_out->h_front_porch =
5004 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5005 timing_out->v_total = mode_in->crtc_vtotal;
5006 timing_out->v_addressable = mode_in->crtc_vdisplay;
5007 timing_out->v_front_porch =
5008 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5009 timing_out->v_sync_width =
5010 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5011 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5012 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5014 stream->output_color_space = get_output_color_space(timing_out);
5016 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5017 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5018 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5019 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5020 drm_mode_is_420_also(info, mode_in) &&
5021 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5022 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5023 adjust_colour_depth_from_display_info(timing_out, info);
5028 static void fill_audio_info(struct audio_info *audio_info,
5029 const struct drm_connector *drm_connector,
5030 const struct dc_sink *dc_sink)
5033 int cea_revision = 0;
5034 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5036 audio_info->manufacture_id = edid_caps->manufacturer_id;
5037 audio_info->product_id = edid_caps->product_id;
5039 cea_revision = drm_connector->display_info.cea_rev;
5041 strscpy(audio_info->display_name,
5042 edid_caps->display_name,
5043 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5045 if (cea_revision >= 3) {
5046 audio_info->mode_count = edid_caps->audio_mode_count;
5048 for (i = 0; i < audio_info->mode_count; ++i) {
5049 audio_info->modes[i].format_code =
5050 (enum audio_format_code)
5051 (edid_caps->audio_modes[i].format_code);
5052 audio_info->modes[i].channel_count =
5053 edid_caps->audio_modes[i].channel_count;
5054 audio_info->modes[i].sample_rates.all =
5055 edid_caps->audio_modes[i].sample_rate;
5056 audio_info->modes[i].sample_size =
5057 edid_caps->audio_modes[i].sample_size;
5061 audio_info->flags.all = edid_caps->speaker_flags;
5063 /* TODO: We only check for the progressive mode, check for interlace mode too */
5064 if (drm_connector->latency_present[0]) {
5065 audio_info->video_latency = drm_connector->video_latency[0];
5066 audio_info->audio_latency = drm_connector->audio_latency[0];
5069 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5074 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5075 struct drm_display_mode *dst_mode)
5077 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5078 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5079 dst_mode->crtc_clock = src_mode->crtc_clock;
5080 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5081 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5082 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5083 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5084 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5085 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5086 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5087 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5088 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5089 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5090 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5094 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5095 const struct drm_display_mode *native_mode,
5098 if (scale_enabled) {
5099 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5100 } else if (native_mode->clock == drm_mode->clock &&
5101 native_mode->htotal == drm_mode->htotal &&
5102 native_mode->vtotal == drm_mode->vtotal) {
5103 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5105 /* no scaling nor amdgpu inserted, no need to patch */
5109 static struct dc_sink *
5110 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5112 struct dc_sink_init_data sink_init_data = { 0 };
5113 struct dc_sink *sink = NULL;
5114 sink_init_data.link = aconnector->dc_link;
5115 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5117 sink = dc_sink_create(&sink_init_data);
5119 DRM_ERROR("Failed to create sink!\n");
5122 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5127 static void set_multisync_trigger_params(
5128 struct dc_stream_state *stream)
5130 if (stream->triggered_crtc_reset.enabled) {
5131 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5132 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5136 static void set_master_stream(struct dc_stream_state *stream_set[],
5139 int j, highest_rfr = 0, master_stream = 0;
5141 for (j = 0; j < stream_count; j++) {
5142 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5143 int refresh_rate = 0;
5145 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5146 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5147 if (refresh_rate > highest_rfr) {
5148 highest_rfr = refresh_rate;
5153 for (j = 0; j < stream_count; j++) {
5155 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5159 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5163 if (context->stream_count < 2)
5165 for (i = 0; i < context->stream_count ; i++) {
5166 if (!context->streams[i])
5169 * TODO: add a function to read AMD VSDB bits and set
5170 * crtc_sync_master.multi_sync_enabled flag
5171 * For now it's set to false
5173 set_multisync_trigger_params(context->streams[i]);
5175 set_master_stream(context->streams, context->stream_count);
5178 static struct dc_stream_state *
5179 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5180 const struct drm_display_mode *drm_mode,
5181 const struct dm_connector_state *dm_state,
5182 const struct dc_stream_state *old_stream,
5185 struct drm_display_mode *preferred_mode = NULL;
5186 struct drm_connector *drm_connector;
5187 const struct drm_connector_state *con_state =
5188 dm_state ? &dm_state->base : NULL;
5189 struct dc_stream_state *stream = NULL;
5190 struct drm_display_mode mode = *drm_mode;
5191 bool native_mode_found = false;
5192 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5194 int preferred_refresh = 0;
5195 #if defined(CONFIG_DRM_AMD_DC_DCN)
5196 struct dsc_dec_dpcd_caps dsc_caps;
5197 uint32_t link_bandwidth_kbps;
5199 struct dc_sink *sink = NULL;
5200 if (aconnector == NULL) {
5201 DRM_ERROR("aconnector is NULL!\n");
5205 drm_connector = &aconnector->base;
5207 if (!aconnector->dc_sink) {
5208 sink = create_fake_sink(aconnector);
5212 sink = aconnector->dc_sink;
5213 dc_sink_retain(sink);
5216 stream = dc_create_stream_for_sink(sink);
5218 if (stream == NULL) {
5219 DRM_ERROR("Failed to create stream for sink!\n");
5223 stream->dm_stream_context = aconnector;
5225 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5226 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5228 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5229 /* Search for preferred mode */
5230 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5231 native_mode_found = true;
5235 if (!native_mode_found)
5236 preferred_mode = list_first_entry_or_null(
5237 &aconnector->base.modes,
5238 struct drm_display_mode,
5241 mode_refresh = drm_mode_vrefresh(&mode);
5243 if (preferred_mode == NULL) {
5245 * This may not be an error, the use case is when we have no
5246 * usermode calls to reset and set mode upon hotplug. In this
5247 * case, we call set mode ourselves to restore the previous mode
5248 * and the modelist may not be filled in in time.
5250 DRM_DEBUG_DRIVER("No preferred mode found\n");
5252 decide_crtc_timing_for_drm_display_mode(
5253 &mode, preferred_mode,
5254 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5255 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5259 drm_mode_set_crtcinfo(&mode, 0);
5262 * If scaling is enabled and refresh rate didn't change
5263 * we copy the vic and polarities of the old timings
5265 if (!scale || mode_refresh != preferred_refresh)
5266 fill_stream_properties_from_drm_display_mode(stream,
5267 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5269 fill_stream_properties_from_drm_display_mode(stream,
5270 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5272 stream->timing.flags.DSC = 0;
5274 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5275 #if defined(CONFIG_DRM_AMD_DC_DCN)
5276 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5277 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5278 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5280 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5281 dc_link_get_link_cap(aconnector->dc_link));
5283 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5284 /* Set DSC policy according to dsc_clock_en */
5285 dc_dsc_policy_set_enable_dsc_when_not_needed(
5286 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5288 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5290 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5292 link_bandwidth_kbps,
5294 &stream->timing.dsc_cfg))
5295 stream->timing.flags.DSC = 1;
5296 /* Overwrite the stream flag if DSC is enabled through debugfs */
5297 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5298 stream->timing.flags.DSC = 1;
5300 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5301 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5303 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5304 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5306 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5307 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5312 update_stream_scaling_settings(&mode, dm_state, stream);
5315 &stream->audio_info,
5319 update_stream_signal(stream, sink);
5321 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5322 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5324 if (stream->link->psr_settings.psr_feature_enabled) {
5326 // should decide stream support vsc sdp colorimetry capability
5327 // before building vsc info packet
5329 stream->use_vsc_sdp_for_colorimetry = false;
5330 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5331 stream->use_vsc_sdp_for_colorimetry =
5332 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5334 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5335 stream->use_vsc_sdp_for_colorimetry = true;
5337 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5340 dc_sink_release(sink);
5345 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5347 drm_crtc_cleanup(crtc);
5351 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5352 struct drm_crtc_state *state)
5354 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5356 /* TODO Destroy dc_stream objects are stream object is flattened */
5358 dc_stream_release(cur->stream);
5361 __drm_atomic_helper_crtc_destroy_state(state);
5367 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5369 struct dm_crtc_state *state;
5372 dm_crtc_destroy_state(crtc, crtc->state);
5374 state = kzalloc(sizeof(*state), GFP_KERNEL);
5375 if (WARN_ON(!state))
5378 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5381 static struct drm_crtc_state *
5382 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5384 struct dm_crtc_state *state, *cur;
5386 cur = to_dm_crtc_state(crtc->state);
5388 if (WARN_ON(!crtc->state))
5391 state = kzalloc(sizeof(*state), GFP_KERNEL);
5395 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5398 state->stream = cur->stream;
5399 dc_stream_retain(state->stream);
5402 state->active_planes = cur->active_planes;
5403 state->vrr_infopacket = cur->vrr_infopacket;
5404 state->abm_level = cur->abm_level;
5405 state->vrr_supported = cur->vrr_supported;
5406 state->freesync_config = cur->freesync_config;
5407 state->crc_src = cur->crc_src;
5408 state->cm_has_degamma = cur->cm_has_degamma;
5409 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5411 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5413 return &state->base;
5416 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5418 enum dc_irq_source irq_source;
5419 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5420 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5423 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5425 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5427 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5428 acrtc->crtc_id, enable ? "en" : "dis", rc);
5432 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5434 enum dc_irq_source irq_source;
5435 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5436 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5437 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5438 #if defined(CONFIG_DRM_AMD_DC_DCN)
5439 struct amdgpu_display_manager *dm = &adev->dm;
5440 unsigned long flags;
5445 /* vblank irq on -> Only need vupdate irq in vrr mode */
5446 if (amdgpu_dm_vrr_active(acrtc_state))
5447 rc = dm_set_vupdate_irq(crtc, true);
5449 /* vblank irq off -> vupdate irq off */
5450 rc = dm_set_vupdate_irq(crtc, false);
5456 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5458 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5461 if (amdgpu_in_reset(adev))
5464 #if defined(CONFIG_DRM_AMD_DC_DCN)
5465 spin_lock_irqsave(&dm->vblank_lock, flags);
5466 dm->vblank_workqueue->dm = dm;
5467 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5468 dm->vblank_workqueue->enable = enable;
5469 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5470 schedule_work(&dm->vblank_workqueue->mall_work);
5476 static int dm_enable_vblank(struct drm_crtc *crtc)
5478 return dm_set_vblank(crtc, true);
5481 static void dm_disable_vblank(struct drm_crtc *crtc)
5483 dm_set_vblank(crtc, false);
5486 /* Implemented only the options currently availible for the driver */
5487 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5488 .reset = dm_crtc_reset_state,
5489 .destroy = amdgpu_dm_crtc_destroy,
5490 .set_config = drm_atomic_helper_set_config,
5491 .page_flip = drm_atomic_helper_page_flip,
5492 .atomic_duplicate_state = dm_crtc_duplicate_state,
5493 .atomic_destroy_state = dm_crtc_destroy_state,
5494 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5495 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5496 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5497 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5498 .enable_vblank = dm_enable_vblank,
5499 .disable_vblank = dm_disable_vblank,
5500 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5503 static enum drm_connector_status
5504 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5507 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5511 * 1. This interface is NOT called in context of HPD irq.
5512 * 2. This interface *is called* in context of user-mode ioctl. Which
5513 * makes it a bad place for *any* MST-related activity.
5516 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5517 !aconnector->fake_enable)
5518 connected = (aconnector->dc_sink != NULL);
5520 connected = (aconnector->base.force == DRM_FORCE_ON);
5522 update_subconnector_property(aconnector);
5524 return (connected ? connector_status_connected :
5525 connector_status_disconnected);
5528 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5529 struct drm_connector_state *connector_state,
5530 struct drm_property *property,
5533 struct drm_device *dev = connector->dev;
5534 struct amdgpu_device *adev = drm_to_adev(dev);
5535 struct dm_connector_state *dm_old_state =
5536 to_dm_connector_state(connector->state);
5537 struct dm_connector_state *dm_new_state =
5538 to_dm_connector_state(connector_state);
5542 if (property == dev->mode_config.scaling_mode_property) {
5543 enum amdgpu_rmx_type rmx_type;
5546 case DRM_MODE_SCALE_CENTER:
5547 rmx_type = RMX_CENTER;
5549 case DRM_MODE_SCALE_ASPECT:
5550 rmx_type = RMX_ASPECT;
5552 case DRM_MODE_SCALE_FULLSCREEN:
5553 rmx_type = RMX_FULL;
5555 case DRM_MODE_SCALE_NONE:
5561 if (dm_old_state->scaling == rmx_type)
5564 dm_new_state->scaling = rmx_type;
5566 } else if (property == adev->mode_info.underscan_hborder_property) {
5567 dm_new_state->underscan_hborder = val;
5569 } else if (property == adev->mode_info.underscan_vborder_property) {
5570 dm_new_state->underscan_vborder = val;
5572 } else if (property == adev->mode_info.underscan_property) {
5573 dm_new_state->underscan_enable = val;
5575 } else if (property == adev->mode_info.abm_level_property) {
5576 dm_new_state->abm_level = val;
5583 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5584 const struct drm_connector_state *state,
5585 struct drm_property *property,
5588 struct drm_device *dev = connector->dev;
5589 struct amdgpu_device *adev = drm_to_adev(dev);
5590 struct dm_connector_state *dm_state =
5591 to_dm_connector_state(state);
5594 if (property == dev->mode_config.scaling_mode_property) {
5595 switch (dm_state->scaling) {
5597 *val = DRM_MODE_SCALE_CENTER;
5600 *val = DRM_MODE_SCALE_ASPECT;
5603 *val = DRM_MODE_SCALE_FULLSCREEN;
5607 *val = DRM_MODE_SCALE_NONE;
5611 } else if (property == adev->mode_info.underscan_hborder_property) {
5612 *val = dm_state->underscan_hborder;
5614 } else if (property == adev->mode_info.underscan_vborder_property) {
5615 *val = dm_state->underscan_vborder;
5617 } else if (property == adev->mode_info.underscan_property) {
5618 *val = dm_state->underscan_enable;
5620 } else if (property == adev->mode_info.abm_level_property) {
5621 *val = dm_state->abm_level;
5628 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5630 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5632 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5635 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5637 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5638 const struct dc_link *link = aconnector->dc_link;
5639 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5640 struct amdgpu_display_manager *dm = &adev->dm;
5643 * Call only if mst_mgr was iniitalized before since it's not done
5644 * for all connector types.
5646 if (aconnector->mst_mgr.dev)
5647 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5649 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5650 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5652 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5653 link->type != dc_connection_none &&
5654 dm->backlight_dev) {
5655 backlight_device_unregister(dm->backlight_dev);
5656 dm->backlight_dev = NULL;
5660 if (aconnector->dc_em_sink)
5661 dc_sink_release(aconnector->dc_em_sink);
5662 aconnector->dc_em_sink = NULL;
5663 if (aconnector->dc_sink)
5664 dc_sink_release(aconnector->dc_sink);
5665 aconnector->dc_sink = NULL;
5667 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5668 drm_connector_unregister(connector);
5669 drm_connector_cleanup(connector);
5670 if (aconnector->i2c) {
5671 i2c_del_adapter(&aconnector->i2c->base);
5672 kfree(aconnector->i2c);
5674 kfree(aconnector->dm_dp_aux.aux.name);
5679 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5681 struct dm_connector_state *state =
5682 to_dm_connector_state(connector->state);
5684 if (connector->state)
5685 __drm_atomic_helper_connector_destroy_state(connector->state);
5689 state = kzalloc(sizeof(*state), GFP_KERNEL);
5692 state->scaling = RMX_OFF;
5693 state->underscan_enable = false;
5694 state->underscan_hborder = 0;
5695 state->underscan_vborder = 0;
5696 state->base.max_requested_bpc = 8;
5697 state->vcpi_slots = 0;
5699 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5700 state->abm_level = amdgpu_dm_abm_level;
5702 __drm_atomic_helper_connector_reset(connector, &state->base);
5706 struct drm_connector_state *
5707 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5709 struct dm_connector_state *state =
5710 to_dm_connector_state(connector->state);
5712 struct dm_connector_state *new_state =
5713 kmemdup(state, sizeof(*state), GFP_KERNEL);
5718 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5720 new_state->freesync_capable = state->freesync_capable;
5721 new_state->abm_level = state->abm_level;
5722 new_state->scaling = state->scaling;
5723 new_state->underscan_enable = state->underscan_enable;
5724 new_state->underscan_hborder = state->underscan_hborder;
5725 new_state->underscan_vborder = state->underscan_vborder;
5726 new_state->vcpi_slots = state->vcpi_slots;
5727 new_state->pbn = state->pbn;
5728 return &new_state->base;
5732 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5734 struct amdgpu_dm_connector *amdgpu_dm_connector =
5735 to_amdgpu_dm_connector(connector);
5738 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5739 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5740 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5741 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5746 #if defined(CONFIG_DEBUG_FS)
5747 connector_debugfs_init(amdgpu_dm_connector);
5753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5754 .reset = amdgpu_dm_connector_funcs_reset,
5755 .detect = amdgpu_dm_connector_detect,
5756 .fill_modes = drm_helper_probe_single_connector_modes,
5757 .destroy = amdgpu_dm_connector_destroy,
5758 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5759 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5760 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5761 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5762 .late_register = amdgpu_dm_connector_late_register,
5763 .early_unregister = amdgpu_dm_connector_unregister
5766 static int get_modes(struct drm_connector *connector)
5768 return amdgpu_dm_connector_get_modes(connector);
5771 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5773 struct dc_sink_init_data init_params = {
5774 .link = aconnector->dc_link,
5775 .sink_signal = SIGNAL_TYPE_VIRTUAL
5779 if (!aconnector->base.edid_blob_ptr) {
5780 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5781 aconnector->base.name);
5783 aconnector->base.force = DRM_FORCE_OFF;
5784 aconnector->base.override_edid = false;
5788 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5790 aconnector->edid = edid;
5792 aconnector->dc_em_sink = dc_link_add_remote_sink(
5793 aconnector->dc_link,
5795 (edid->extensions + 1) * EDID_LENGTH,
5798 if (aconnector->base.force == DRM_FORCE_ON) {
5799 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5800 aconnector->dc_link->local_sink :
5801 aconnector->dc_em_sink;
5802 dc_sink_retain(aconnector->dc_sink);
5806 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5808 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5811 * In case of headless boot with force on for DP managed connector
5812 * Those settings have to be != 0 to get initial modeset
5814 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5815 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5816 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5820 aconnector->base.override_edid = true;
5821 create_eml_sink(aconnector);
5824 static struct dc_stream_state *
5825 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5826 const struct drm_display_mode *drm_mode,
5827 const struct dm_connector_state *dm_state,
5828 const struct dc_stream_state *old_stream)
5830 struct drm_connector *connector = &aconnector->base;
5831 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5832 struct dc_stream_state *stream;
5833 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5834 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5835 enum dc_status dc_result = DC_OK;
5838 stream = create_stream_for_sink(aconnector, drm_mode,
5839 dm_state, old_stream,
5841 if (stream == NULL) {
5842 DRM_ERROR("Failed to create stream for sink!\n");
5846 dc_result = dc_validate_stream(adev->dm.dc, stream);
5848 if (dc_result != DC_OK) {
5849 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5854 dc_status_to_str(dc_result));
5856 dc_stream_release(stream);
5858 requested_bpc -= 2; /* lower bpc to retry validation */
5861 } while (stream == NULL && requested_bpc >= 6);
5866 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5867 struct drm_display_mode *mode)
5869 int result = MODE_ERROR;
5870 struct dc_sink *dc_sink;
5871 /* TODO: Unhardcode stream count */
5872 struct dc_stream_state *stream;
5873 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5875 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5876 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5880 * Only run this the first time mode_valid is called to initilialize
5883 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5884 !aconnector->dc_em_sink)
5885 handle_edid_mgmt(aconnector);
5887 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5889 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5890 aconnector->base.force != DRM_FORCE_ON) {
5891 DRM_ERROR("dc_sink is NULL!\n");
5895 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5897 dc_stream_release(stream);
5902 /* TODO: error handling*/
5906 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5907 struct dc_info_packet *out)
5909 struct hdmi_drm_infoframe frame;
5910 unsigned char buf[30]; /* 26 + 4 */
5914 memset(out, 0, sizeof(*out));
5916 if (!state->hdr_output_metadata)
5919 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5923 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5927 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5931 /* Prepare the infopacket for DC. */
5932 switch (state->connector->connector_type) {
5933 case DRM_MODE_CONNECTOR_HDMIA:
5934 out->hb0 = 0x87; /* type */
5935 out->hb1 = 0x01; /* version */
5936 out->hb2 = 0x1A; /* length */
5937 out->sb[0] = buf[3]; /* checksum */
5941 case DRM_MODE_CONNECTOR_DisplayPort:
5942 case DRM_MODE_CONNECTOR_eDP:
5943 out->hb0 = 0x00; /* sdp id, zero */
5944 out->hb1 = 0x87; /* type */
5945 out->hb2 = 0x1D; /* payload len - 1 */
5946 out->hb3 = (0x13 << 2); /* sdp version */
5947 out->sb[0] = 0x01; /* version */
5948 out->sb[1] = 0x1A; /* length */
5956 memcpy(&out->sb[i], &buf[4], 26);
5959 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5960 sizeof(out->sb), false);
5966 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5967 const struct drm_connector_state *new_state)
5969 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5970 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5972 if (old_blob != new_blob) {
5973 if (old_blob && new_blob &&
5974 old_blob->length == new_blob->length)
5975 return memcmp(old_blob->data, new_blob->data,
5985 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5986 struct drm_atomic_state *state)
5988 struct drm_connector_state *new_con_state =
5989 drm_atomic_get_new_connector_state(state, conn);
5990 struct drm_connector_state *old_con_state =
5991 drm_atomic_get_old_connector_state(state, conn);
5992 struct drm_crtc *crtc = new_con_state->crtc;
5993 struct drm_crtc_state *new_crtc_state;
5996 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6001 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6002 struct dc_info_packet hdr_infopacket;
6004 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6008 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6009 if (IS_ERR(new_crtc_state))
6010 return PTR_ERR(new_crtc_state);
6013 * DC considers the stream backends changed if the
6014 * static metadata changes. Forcing the modeset also
6015 * gives a simple way for userspace to switch from
6016 * 8bpc to 10bpc when setting the metadata to enter
6019 * Changing the static metadata after it's been
6020 * set is permissible, however. So only force a
6021 * modeset if we're entering or exiting HDR.
6023 new_crtc_state->mode_changed =
6024 !old_con_state->hdr_output_metadata ||
6025 !new_con_state->hdr_output_metadata;
6031 static const struct drm_connector_helper_funcs
6032 amdgpu_dm_connector_helper_funcs = {
6034 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6035 * modes will be filtered by drm_mode_validate_size(), and those modes
6036 * are missing after user start lightdm. So we need to renew modes list.
6037 * in get_modes call back, not just return the modes count
6039 .get_modes = get_modes,
6040 .mode_valid = amdgpu_dm_connector_mode_valid,
6041 .atomic_check = amdgpu_dm_connector_atomic_check,
6044 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6048 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6050 struct drm_atomic_state *state = new_crtc_state->state;
6051 struct drm_plane *plane;
6054 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6055 struct drm_plane_state *new_plane_state;
6057 /* Cursor planes are "fake". */
6058 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6061 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6063 if (!new_plane_state) {
6065 * The plane is enable on the CRTC and hasn't changed
6066 * state. This means that it previously passed
6067 * validation and is therefore enabled.
6073 /* We need a framebuffer to be considered enabled. */
6074 num_active += (new_plane_state->fb != NULL);
6080 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6081 struct drm_crtc_state *new_crtc_state)
6083 struct dm_crtc_state *dm_new_crtc_state =
6084 to_dm_crtc_state(new_crtc_state);
6086 dm_new_crtc_state->active_planes = 0;
6088 if (!dm_new_crtc_state->stream)
6091 dm_new_crtc_state->active_planes =
6092 count_crtc_active_planes(new_crtc_state);
6095 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6096 struct drm_atomic_state *state)
6098 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6100 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6101 struct dc *dc = adev->dm.dc;
6102 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6105 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6107 dm_update_crtc_active_planes(crtc, crtc_state);
6109 if (unlikely(!dm_crtc_state->stream &&
6110 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6116 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6117 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6118 * planes are disabled, which is not supported by the hardware. And there is legacy
6119 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6121 if (crtc_state->enable &&
6122 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6123 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6127 /* In some use cases, like reset, no stream is attached */
6128 if (!dm_crtc_state->stream)
6131 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6134 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6138 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6139 const struct drm_display_mode *mode,
6140 struct drm_display_mode *adjusted_mode)
6145 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6146 .disable = dm_crtc_helper_disable,
6147 .atomic_check = dm_crtc_helper_atomic_check,
6148 .mode_fixup = dm_crtc_helper_mode_fixup,
6149 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6152 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6157 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6159 switch (display_color_depth) {
6160 case COLOR_DEPTH_666:
6162 case COLOR_DEPTH_888:
6164 case COLOR_DEPTH_101010:
6166 case COLOR_DEPTH_121212:
6168 case COLOR_DEPTH_141414:
6170 case COLOR_DEPTH_161616:
6178 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6179 struct drm_crtc_state *crtc_state,
6180 struct drm_connector_state *conn_state)
6182 struct drm_atomic_state *state = crtc_state->state;
6183 struct drm_connector *connector = conn_state->connector;
6184 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6185 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6186 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6187 struct drm_dp_mst_topology_mgr *mst_mgr;
6188 struct drm_dp_mst_port *mst_port;
6189 enum dc_color_depth color_depth;
6191 bool is_y420 = false;
6193 if (!aconnector->port || !aconnector->dc_sink)
6196 mst_port = aconnector->port;
6197 mst_mgr = &aconnector->mst_port->mst_mgr;
6199 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6202 if (!state->duplicated) {
6203 int max_bpc = conn_state->max_requested_bpc;
6204 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6205 aconnector->force_yuv420_output;
6206 color_depth = convert_color_depth_from_display_info(connector,
6209 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6210 clock = adjusted_mode->clock;
6211 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6213 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6216 dm_new_connector_state->pbn,
6217 dm_mst_get_pbn_divider(aconnector->dc_link));
6218 if (dm_new_connector_state->vcpi_slots < 0) {
6219 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6220 return dm_new_connector_state->vcpi_slots;
6225 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6226 .disable = dm_encoder_helper_disable,
6227 .atomic_check = dm_encoder_helper_atomic_check
6230 #if defined(CONFIG_DRM_AMD_DC_DCN)
6231 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6232 struct dc_state *dc_state)
6234 struct dc_stream_state *stream = NULL;
6235 struct drm_connector *connector;
6236 struct drm_connector_state *new_con_state, *old_con_state;
6237 struct amdgpu_dm_connector *aconnector;
6238 struct dm_connector_state *dm_conn_state;
6239 int i, j, clock, bpp;
6240 int vcpi, pbn_div, pbn = 0;
6242 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6244 aconnector = to_amdgpu_dm_connector(connector);
6246 if (!aconnector->port)
6249 if (!new_con_state || !new_con_state->crtc)
6252 dm_conn_state = to_dm_connector_state(new_con_state);
6254 for (j = 0; j < dc_state->stream_count; j++) {
6255 stream = dc_state->streams[j];
6259 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6268 if (stream->timing.flags.DSC != 1) {
6269 drm_dp_mst_atomic_enable_dsc(state,
6277 pbn_div = dm_mst_get_pbn_divider(stream->link);
6278 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6279 clock = stream->timing.pix_clk_100hz / 10;
6280 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6281 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6288 dm_conn_state->pbn = pbn;
6289 dm_conn_state->vcpi_slots = vcpi;
6295 static void dm_drm_plane_reset(struct drm_plane *plane)
6297 struct dm_plane_state *amdgpu_state = NULL;
6300 plane->funcs->atomic_destroy_state(plane, plane->state);
6302 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6303 WARN_ON(amdgpu_state == NULL);
6306 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6309 static struct drm_plane_state *
6310 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6312 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6314 old_dm_plane_state = to_dm_plane_state(plane->state);
6315 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6316 if (!dm_plane_state)
6319 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6321 if (old_dm_plane_state->dc_state) {
6322 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6323 dc_plane_state_retain(dm_plane_state->dc_state);
6326 return &dm_plane_state->base;
6329 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6330 struct drm_plane_state *state)
6332 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6334 if (dm_plane_state->dc_state)
6335 dc_plane_state_release(dm_plane_state->dc_state);
6337 drm_atomic_helper_plane_destroy_state(plane, state);
6340 static const struct drm_plane_funcs dm_plane_funcs = {
6341 .update_plane = drm_atomic_helper_update_plane,
6342 .disable_plane = drm_atomic_helper_disable_plane,
6343 .destroy = drm_primary_helper_destroy,
6344 .reset = dm_drm_plane_reset,
6345 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6346 .atomic_destroy_state = dm_drm_plane_destroy_state,
6347 .format_mod_supported = dm_plane_format_mod_supported,
6350 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6351 struct drm_plane_state *new_state)
6353 struct amdgpu_framebuffer *afb;
6354 struct drm_gem_object *obj;
6355 struct amdgpu_device *adev;
6356 struct amdgpu_bo *rbo;
6357 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6358 struct list_head list;
6359 struct ttm_validate_buffer tv;
6360 struct ww_acquire_ctx ticket;
6364 if (!new_state->fb) {
6365 DRM_DEBUG_DRIVER("No FB bound\n");
6369 afb = to_amdgpu_framebuffer(new_state->fb);
6370 obj = new_state->fb->obj[0];
6371 rbo = gem_to_amdgpu_bo(obj);
6372 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6373 INIT_LIST_HEAD(&list);
6377 list_add(&tv.head, &list);
6379 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6381 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6385 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6386 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6388 domain = AMDGPU_GEM_DOMAIN_VRAM;
6390 r = amdgpu_bo_pin(rbo, domain);
6391 if (unlikely(r != 0)) {
6392 if (r != -ERESTARTSYS)
6393 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6394 ttm_eu_backoff_reservation(&ticket, &list);
6398 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6399 if (unlikely(r != 0)) {
6400 amdgpu_bo_unpin(rbo);
6401 ttm_eu_backoff_reservation(&ticket, &list);
6402 DRM_ERROR("%p bind failed\n", rbo);
6406 ttm_eu_backoff_reservation(&ticket, &list);
6408 afb->address = amdgpu_bo_gpu_offset(rbo);
6413 * We don't do surface updates on planes that have been newly created,
6414 * but we also don't have the afb->address during atomic check.
6416 * Fill in buffer attributes depending on the address here, but only on
6417 * newly created planes since they're not being used by DC yet and this
6418 * won't modify global state.
6420 dm_plane_state_old = to_dm_plane_state(plane->state);
6421 dm_plane_state_new = to_dm_plane_state(new_state);
6423 if (dm_plane_state_new->dc_state &&
6424 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6425 struct dc_plane_state *plane_state =
6426 dm_plane_state_new->dc_state;
6427 bool force_disable_dcc = !plane_state->dcc.enable;
6429 fill_plane_buffer_attributes(
6430 adev, afb, plane_state->format, plane_state->rotation,
6432 &plane_state->tiling_info, &plane_state->plane_size,
6433 &plane_state->dcc, &plane_state->address,
6434 afb->tmz_surface, force_disable_dcc);
6440 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6441 struct drm_plane_state *old_state)
6443 struct amdgpu_bo *rbo;
6449 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6450 r = amdgpu_bo_reserve(rbo, false);
6452 DRM_ERROR("failed to reserve rbo before unpin\n");
6456 amdgpu_bo_unpin(rbo);
6457 amdgpu_bo_unreserve(rbo);
6458 amdgpu_bo_unref(&rbo);
6461 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6462 struct drm_crtc_state *new_crtc_state)
6464 struct drm_framebuffer *fb = state->fb;
6465 int min_downscale, max_upscale;
6467 int max_scale = INT_MAX;
6469 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6470 if (fb && state->crtc) {
6471 /* Validate viewport to cover the case when only the position changes */
6472 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6473 int viewport_width = state->crtc_w;
6474 int viewport_height = state->crtc_h;
6476 if (state->crtc_x < 0)
6477 viewport_width += state->crtc_x;
6478 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6479 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6481 if (state->crtc_y < 0)
6482 viewport_height += state->crtc_y;
6483 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6484 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6486 /* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6487 * which is still OK to satisfy the condition below, thereby also covering these cases
6488 * (when plane is completely outside of screen).
6489 * x2 for width is because of pipe-split.
6491 if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6495 /* Get min/max allowed scaling factors from plane caps. */
6496 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6497 &min_downscale, &max_upscale);
6499 * Convert to drm convention: 16.16 fixed point, instead of dc's
6500 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6501 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6503 min_scale = (1000 << 16) / max_upscale;
6504 max_scale = (1000 << 16) / min_downscale;
6507 return drm_atomic_helper_check_plane_state(
6508 state, new_crtc_state, min_scale, max_scale, true, true);
6511 static int dm_plane_atomic_check(struct drm_plane *plane,
6512 struct drm_plane_state *state)
6514 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6515 struct dc *dc = adev->dm.dc;
6516 struct dm_plane_state *dm_plane_state;
6517 struct dc_scaling_info scaling_info;
6518 struct drm_crtc_state *new_crtc_state;
6521 trace_amdgpu_dm_plane_atomic_check(state);
6523 dm_plane_state = to_dm_plane_state(state);
6525 if (!dm_plane_state->dc_state)
6529 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6530 if (!new_crtc_state)
6533 ret = dm_plane_helper_check_state(state, new_crtc_state);
6537 ret = fill_dc_scaling_info(state, &scaling_info);
6541 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6547 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6548 struct drm_plane_state *new_plane_state)
6550 /* Only support async updates on cursor planes. */
6551 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6557 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6558 struct drm_plane_state *new_state)
6560 struct drm_plane_state *old_state =
6561 drm_atomic_get_old_plane_state(new_state->state, plane);
6563 trace_amdgpu_dm_atomic_update_cursor(new_state);
6565 swap(plane->state->fb, new_state->fb);
6567 plane->state->src_x = new_state->src_x;
6568 plane->state->src_y = new_state->src_y;
6569 plane->state->src_w = new_state->src_w;
6570 plane->state->src_h = new_state->src_h;
6571 plane->state->crtc_x = new_state->crtc_x;
6572 plane->state->crtc_y = new_state->crtc_y;
6573 plane->state->crtc_w = new_state->crtc_w;
6574 plane->state->crtc_h = new_state->crtc_h;
6576 handle_cursor_update(plane, old_state);
6579 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6580 .prepare_fb = dm_plane_helper_prepare_fb,
6581 .cleanup_fb = dm_plane_helper_cleanup_fb,
6582 .atomic_check = dm_plane_atomic_check,
6583 .atomic_async_check = dm_plane_atomic_async_check,
6584 .atomic_async_update = dm_plane_atomic_async_update
6588 * TODO: these are currently initialized to rgb formats only.
6589 * For future use cases we should either initialize them dynamically based on
6590 * plane capabilities, or initialize this array to all formats, so internal drm
6591 * check will succeed, and let DC implement proper check
6593 static const uint32_t rgb_formats[] = {
6594 DRM_FORMAT_XRGB8888,
6595 DRM_FORMAT_ARGB8888,
6596 DRM_FORMAT_RGBA8888,
6597 DRM_FORMAT_XRGB2101010,
6598 DRM_FORMAT_XBGR2101010,
6599 DRM_FORMAT_ARGB2101010,
6600 DRM_FORMAT_ABGR2101010,
6601 DRM_FORMAT_XBGR8888,
6602 DRM_FORMAT_ABGR8888,
6606 static const uint32_t overlay_formats[] = {
6607 DRM_FORMAT_XRGB8888,
6608 DRM_FORMAT_ARGB8888,
6609 DRM_FORMAT_RGBA8888,
6610 DRM_FORMAT_XBGR8888,
6611 DRM_FORMAT_ABGR8888,
6615 static const u32 cursor_formats[] = {
6619 static int get_plane_formats(const struct drm_plane *plane,
6620 const struct dc_plane_cap *plane_cap,
6621 uint32_t *formats, int max_formats)
6623 int i, num_formats = 0;
6626 * TODO: Query support for each group of formats directly from
6627 * DC plane caps. This will require adding more formats to the
6631 switch (plane->type) {
6632 case DRM_PLANE_TYPE_PRIMARY:
6633 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6634 if (num_formats >= max_formats)
6637 formats[num_formats++] = rgb_formats[i];
6640 if (plane_cap && plane_cap->pixel_format_support.nv12)
6641 formats[num_formats++] = DRM_FORMAT_NV12;
6642 if (plane_cap && plane_cap->pixel_format_support.p010)
6643 formats[num_formats++] = DRM_FORMAT_P010;
6644 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6645 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6646 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6647 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6648 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6652 case DRM_PLANE_TYPE_OVERLAY:
6653 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6654 if (num_formats >= max_formats)
6657 formats[num_formats++] = overlay_formats[i];
6661 case DRM_PLANE_TYPE_CURSOR:
6662 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6663 if (num_formats >= max_formats)
6666 formats[num_formats++] = cursor_formats[i];
6674 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6675 struct drm_plane *plane,
6676 unsigned long possible_crtcs,
6677 const struct dc_plane_cap *plane_cap)
6679 uint32_t formats[32];
6682 unsigned int supported_rotations;
6683 uint64_t *modifiers = NULL;
6685 num_formats = get_plane_formats(plane, plane_cap, formats,
6686 ARRAY_SIZE(formats));
6688 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6692 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6693 &dm_plane_funcs, formats, num_formats,
6694 modifiers, plane->type, NULL);
6699 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6700 plane_cap && plane_cap->per_pixel_alpha) {
6701 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6702 BIT(DRM_MODE_BLEND_PREMULTI);
6704 drm_plane_create_alpha_property(plane);
6705 drm_plane_create_blend_mode_property(plane, blend_caps);
6708 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6710 (plane_cap->pixel_format_support.nv12 ||
6711 plane_cap->pixel_format_support.p010)) {
6712 /* This only affects YUV formats. */
6713 drm_plane_create_color_properties(
6715 BIT(DRM_COLOR_YCBCR_BT601) |
6716 BIT(DRM_COLOR_YCBCR_BT709) |
6717 BIT(DRM_COLOR_YCBCR_BT2020),
6718 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6719 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6720 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6723 supported_rotations =
6724 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6725 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6727 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6728 plane->type != DRM_PLANE_TYPE_CURSOR)
6729 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6730 supported_rotations);
6732 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6734 /* Create (reset) the plane state */
6735 if (plane->funcs->reset)
6736 plane->funcs->reset(plane);
6741 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6742 struct drm_plane *plane,
6743 uint32_t crtc_index)
6745 struct amdgpu_crtc *acrtc = NULL;
6746 struct drm_plane *cursor_plane;
6750 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6754 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6755 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6757 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6761 res = drm_crtc_init_with_planes(
6766 &amdgpu_dm_crtc_funcs, NULL);
6771 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6773 /* Create (reset) the plane state */
6774 if (acrtc->base.funcs->reset)
6775 acrtc->base.funcs->reset(&acrtc->base);
6777 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6778 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6780 acrtc->crtc_id = crtc_index;
6781 acrtc->base.enabled = false;
6782 acrtc->otg_inst = -1;
6784 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6785 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6786 true, MAX_COLOR_LUT_ENTRIES);
6787 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6793 kfree(cursor_plane);
6798 static int to_drm_connector_type(enum signal_type st)
6801 case SIGNAL_TYPE_HDMI_TYPE_A:
6802 return DRM_MODE_CONNECTOR_HDMIA;
6803 case SIGNAL_TYPE_EDP:
6804 return DRM_MODE_CONNECTOR_eDP;
6805 case SIGNAL_TYPE_LVDS:
6806 return DRM_MODE_CONNECTOR_LVDS;
6807 case SIGNAL_TYPE_RGB:
6808 return DRM_MODE_CONNECTOR_VGA;
6809 case SIGNAL_TYPE_DISPLAY_PORT:
6810 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6811 return DRM_MODE_CONNECTOR_DisplayPort;
6812 case SIGNAL_TYPE_DVI_DUAL_LINK:
6813 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6814 return DRM_MODE_CONNECTOR_DVID;
6815 case SIGNAL_TYPE_VIRTUAL:
6816 return DRM_MODE_CONNECTOR_VIRTUAL;
6819 return DRM_MODE_CONNECTOR_Unknown;
6823 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6825 struct drm_encoder *encoder;
6827 /* There is only one encoder per connector */
6828 drm_connector_for_each_possible_encoder(connector, encoder)
6834 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6836 struct drm_encoder *encoder;
6837 struct amdgpu_encoder *amdgpu_encoder;
6839 encoder = amdgpu_dm_connector_to_encoder(connector);
6841 if (encoder == NULL)
6844 amdgpu_encoder = to_amdgpu_encoder(encoder);
6846 amdgpu_encoder->native_mode.clock = 0;
6848 if (!list_empty(&connector->probed_modes)) {
6849 struct drm_display_mode *preferred_mode = NULL;
6851 list_for_each_entry(preferred_mode,
6852 &connector->probed_modes,
6854 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6855 amdgpu_encoder->native_mode = *preferred_mode;
6863 static struct drm_display_mode *
6864 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6866 int hdisplay, int vdisplay)
6868 struct drm_device *dev = encoder->dev;
6869 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6870 struct drm_display_mode *mode = NULL;
6871 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6873 mode = drm_mode_duplicate(dev, native_mode);
6878 mode->hdisplay = hdisplay;
6879 mode->vdisplay = vdisplay;
6880 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6881 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6887 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6888 struct drm_connector *connector)
6890 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6891 struct drm_display_mode *mode = NULL;
6892 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6893 struct amdgpu_dm_connector *amdgpu_dm_connector =
6894 to_amdgpu_dm_connector(connector);
6898 char name[DRM_DISPLAY_MODE_LEN];
6901 } common_modes[] = {
6902 { "640x480", 640, 480},
6903 { "800x600", 800, 600},
6904 { "1024x768", 1024, 768},
6905 { "1280x720", 1280, 720},
6906 { "1280x800", 1280, 800},
6907 {"1280x1024", 1280, 1024},
6908 { "1440x900", 1440, 900},
6909 {"1680x1050", 1680, 1050},
6910 {"1600x1200", 1600, 1200},
6911 {"1920x1080", 1920, 1080},
6912 {"1920x1200", 1920, 1200}
6915 n = ARRAY_SIZE(common_modes);
6917 for (i = 0; i < n; i++) {
6918 struct drm_display_mode *curmode = NULL;
6919 bool mode_existed = false;
6921 if (common_modes[i].w > native_mode->hdisplay ||
6922 common_modes[i].h > native_mode->vdisplay ||
6923 (common_modes[i].w == native_mode->hdisplay &&
6924 common_modes[i].h == native_mode->vdisplay))
6927 list_for_each_entry(curmode, &connector->probed_modes, head) {
6928 if (common_modes[i].w == curmode->hdisplay &&
6929 common_modes[i].h == curmode->vdisplay) {
6930 mode_existed = true;
6938 mode = amdgpu_dm_create_common_mode(encoder,
6939 common_modes[i].name, common_modes[i].w,
6941 drm_mode_probed_add(connector, mode);
6942 amdgpu_dm_connector->num_modes++;
6946 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6949 struct amdgpu_dm_connector *amdgpu_dm_connector =
6950 to_amdgpu_dm_connector(connector);
6953 /* empty probed_modes */
6954 INIT_LIST_HEAD(&connector->probed_modes);
6955 amdgpu_dm_connector->num_modes =
6956 drm_add_edid_modes(connector, edid);
6958 /* sorting the probed modes before calling function
6959 * amdgpu_dm_get_native_mode() since EDID can have
6960 * more than one preferred mode. The modes that are
6961 * later in the probed mode list could be of higher
6962 * and preferred resolution. For example, 3840x2160
6963 * resolution in base EDID preferred timing and 4096x2160
6964 * preferred resolution in DID extension block later.
6966 drm_mode_sort(&connector->probed_modes);
6967 amdgpu_dm_get_native_mode(connector);
6969 amdgpu_dm_connector->num_modes = 0;
6973 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6975 struct amdgpu_dm_connector *amdgpu_dm_connector =
6976 to_amdgpu_dm_connector(connector);
6977 struct drm_encoder *encoder;
6978 struct edid *edid = amdgpu_dm_connector->edid;
6980 encoder = amdgpu_dm_connector_to_encoder(connector);
6982 if (!drm_edid_is_valid(edid)) {
6983 amdgpu_dm_connector->num_modes =
6984 drm_add_modes_noedid(connector, 640, 480);
6986 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6987 amdgpu_dm_connector_add_common_modes(encoder, connector);
6989 amdgpu_dm_fbc_init(connector);
6991 return amdgpu_dm_connector->num_modes;
6994 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6995 struct amdgpu_dm_connector *aconnector,
6997 struct dc_link *link,
7000 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7003 * Some of the properties below require access to state, like bpc.
7004 * Allocate some default initial connector state with our reset helper.
7006 if (aconnector->base.funcs->reset)
7007 aconnector->base.funcs->reset(&aconnector->base);
7009 aconnector->connector_id = link_index;
7010 aconnector->dc_link = link;
7011 aconnector->base.interlace_allowed = false;
7012 aconnector->base.doublescan_allowed = false;
7013 aconnector->base.stereo_allowed = false;
7014 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7015 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7016 aconnector->audio_inst = -1;
7017 mutex_init(&aconnector->hpd_lock);
7020 * configure support HPD hot plug connector_>polled default value is 0
7021 * which means HPD hot plug not supported
7023 switch (connector_type) {
7024 case DRM_MODE_CONNECTOR_HDMIA:
7025 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7026 aconnector->base.ycbcr_420_allowed =
7027 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7029 case DRM_MODE_CONNECTOR_DisplayPort:
7030 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7031 aconnector->base.ycbcr_420_allowed =
7032 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7034 case DRM_MODE_CONNECTOR_DVID:
7035 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7041 drm_object_attach_property(&aconnector->base.base,
7042 dm->ddev->mode_config.scaling_mode_property,
7043 DRM_MODE_SCALE_NONE);
7045 drm_object_attach_property(&aconnector->base.base,
7046 adev->mode_info.underscan_property,
7048 drm_object_attach_property(&aconnector->base.base,
7049 adev->mode_info.underscan_hborder_property,
7051 drm_object_attach_property(&aconnector->base.base,
7052 adev->mode_info.underscan_vborder_property,
7055 if (!aconnector->mst_port)
7056 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7058 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7059 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7060 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7062 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7063 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7064 drm_object_attach_property(&aconnector->base.base,
7065 adev->mode_info.abm_level_property, 0);
7068 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7069 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7070 connector_type == DRM_MODE_CONNECTOR_eDP) {
7071 drm_object_attach_property(
7072 &aconnector->base.base,
7073 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7075 if (!aconnector->mst_port)
7076 drm_connector_attach_vrr_capable_property(&aconnector->base);
7078 #ifdef CONFIG_DRM_AMD_DC_HDCP
7079 if (adev->dm.hdcp_workqueue)
7080 drm_connector_attach_content_protection_property(&aconnector->base, true);
7085 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7086 struct i2c_msg *msgs, int num)
7088 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7089 struct ddc_service *ddc_service = i2c->ddc_service;
7090 struct i2c_command cmd;
7094 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7099 cmd.number_of_payloads = num;
7100 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7103 for (i = 0; i < num; i++) {
7104 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7105 cmd.payloads[i].address = msgs[i].addr;
7106 cmd.payloads[i].length = msgs[i].len;
7107 cmd.payloads[i].data = msgs[i].buf;
7111 ddc_service->ctx->dc,
7112 ddc_service->ddc_pin->hw_info.ddc_channel,
7116 kfree(cmd.payloads);
7120 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7122 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7125 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7126 .master_xfer = amdgpu_dm_i2c_xfer,
7127 .functionality = amdgpu_dm_i2c_func,
7130 static struct amdgpu_i2c_adapter *
7131 create_i2c(struct ddc_service *ddc_service,
7135 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7136 struct amdgpu_i2c_adapter *i2c;
7138 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7141 i2c->base.owner = THIS_MODULE;
7142 i2c->base.class = I2C_CLASS_DDC;
7143 i2c->base.dev.parent = &adev->pdev->dev;
7144 i2c->base.algo = &amdgpu_dm_i2c_algo;
7145 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7146 i2c_set_adapdata(&i2c->base, i2c);
7147 i2c->ddc_service = ddc_service;
7148 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7155 * Note: this function assumes that dc_link_detect() was called for the
7156 * dc_link which will be represented by this aconnector.
7158 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7159 struct amdgpu_dm_connector *aconnector,
7160 uint32_t link_index,
7161 struct amdgpu_encoder *aencoder)
7165 struct dc *dc = dm->dc;
7166 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7167 struct amdgpu_i2c_adapter *i2c;
7169 link->priv = aconnector;
7171 DRM_DEBUG_DRIVER("%s()\n", __func__);
7173 i2c = create_i2c(link->ddc, link->link_index, &res);
7175 DRM_ERROR("Failed to create i2c adapter data\n");
7179 aconnector->i2c = i2c;
7180 res = i2c_add_adapter(&i2c->base);
7183 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7187 connector_type = to_drm_connector_type(link->connector_signal);
7189 res = drm_connector_init_with_ddc(
7192 &amdgpu_dm_connector_funcs,
7197 DRM_ERROR("connector_init failed\n");
7198 aconnector->connector_id = -1;
7202 drm_connector_helper_add(
7204 &amdgpu_dm_connector_helper_funcs);
7206 amdgpu_dm_connector_init_helper(
7213 drm_connector_attach_encoder(
7214 &aconnector->base, &aencoder->base);
7216 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7217 || connector_type == DRM_MODE_CONNECTOR_eDP)
7218 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7223 aconnector->i2c = NULL;
7228 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7230 switch (adev->mode_info.num_crtc) {
7247 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7248 struct amdgpu_encoder *aencoder,
7249 uint32_t link_index)
7251 struct amdgpu_device *adev = drm_to_adev(dev);
7253 int res = drm_encoder_init(dev,
7255 &amdgpu_dm_encoder_funcs,
7256 DRM_MODE_ENCODER_TMDS,
7259 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7262 aencoder->encoder_id = link_index;
7264 aencoder->encoder_id = -1;
7266 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7271 static void manage_dm_interrupts(struct amdgpu_device *adev,
7272 struct amdgpu_crtc *acrtc,
7276 * We have no guarantee that the frontend index maps to the same
7277 * backend index - some even map to more than one.
7279 * TODO: Use a different interrupt or check DC itself for the mapping.
7282 amdgpu_display_crtc_idx_to_irq_type(
7287 drm_crtc_vblank_on(&acrtc->base);
7290 &adev->pageflip_irq,
7296 &adev->pageflip_irq,
7298 drm_crtc_vblank_off(&acrtc->base);
7302 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7303 struct amdgpu_crtc *acrtc)
7306 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7309 * This reads the current state for the IRQ and force reapplies
7310 * the setting to hardware.
7312 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7316 is_scaling_state_different(const struct dm_connector_state *dm_state,
7317 const struct dm_connector_state *old_dm_state)
7319 if (dm_state->scaling != old_dm_state->scaling)
7321 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7322 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7324 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7325 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7327 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7328 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7333 #ifdef CONFIG_DRM_AMD_DC_HDCP
7334 static bool is_content_protection_different(struct drm_connector_state *state,
7335 const struct drm_connector_state *old_state,
7336 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7338 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7339 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7341 /* Handle: Type0/1 change */
7342 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7343 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7344 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7348 /* CP is being re enabled, ignore this
7350 * Handles: ENABLED -> DESIRED
7352 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7353 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7354 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7358 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7360 * Handles: UNDESIRED -> ENABLED
7362 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7363 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7364 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7366 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7367 * hot-plug, headless s3, dpms
7369 * Handles: DESIRED -> DESIRED (Special case)
7371 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7372 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7373 dm_con_state->update_hdcp = false;
7378 * Handles: UNDESIRED -> UNDESIRED
7379 * DESIRED -> DESIRED
7380 * ENABLED -> ENABLED
7382 if (old_state->content_protection == state->content_protection)
7386 * Handles: UNDESIRED -> DESIRED
7387 * DESIRED -> UNDESIRED
7388 * ENABLED -> UNDESIRED
7390 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7394 * Handles: DESIRED -> ENABLED
7400 static void remove_stream(struct amdgpu_device *adev,
7401 struct amdgpu_crtc *acrtc,
7402 struct dc_stream_state *stream)
7404 /* this is the update mode case */
7406 acrtc->otg_inst = -1;
7407 acrtc->enabled = false;
7410 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7411 struct dc_cursor_position *position)
7413 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7415 int xorigin = 0, yorigin = 0;
7417 position->enable = false;
7421 if (!crtc || !plane->state->fb)
7424 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7425 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7426 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7428 plane->state->crtc_w,
7429 plane->state->crtc_h);
7433 x = plane->state->crtc_x;
7434 y = plane->state->crtc_y;
7436 if (x <= -amdgpu_crtc->max_cursor_width ||
7437 y <= -amdgpu_crtc->max_cursor_height)
7441 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7445 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7448 position->enable = true;
7449 position->translate_by_source = true;
7452 position->x_hotspot = xorigin;
7453 position->y_hotspot = yorigin;
7458 static void handle_cursor_update(struct drm_plane *plane,
7459 struct drm_plane_state *old_plane_state)
7461 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7462 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7463 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7464 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7465 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7466 uint64_t address = afb ? afb->address : 0;
7467 struct dc_cursor_position position;
7468 struct dc_cursor_attributes attributes;
7471 if (!plane->state->fb && !old_plane_state->fb)
7474 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7476 amdgpu_crtc->crtc_id,
7477 plane->state->crtc_w,
7478 plane->state->crtc_h);
7480 ret = get_cursor_position(plane, crtc, &position);
7484 if (!position.enable) {
7485 /* turn off cursor */
7486 if (crtc_state && crtc_state->stream) {
7487 mutex_lock(&adev->dm.dc_lock);
7488 dc_stream_set_cursor_position(crtc_state->stream,
7490 mutex_unlock(&adev->dm.dc_lock);
7495 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7496 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7498 memset(&attributes, 0, sizeof(attributes));
7499 attributes.address.high_part = upper_32_bits(address);
7500 attributes.address.low_part = lower_32_bits(address);
7501 attributes.width = plane->state->crtc_w;
7502 attributes.height = plane->state->crtc_h;
7503 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7504 attributes.rotation_angle = 0;
7505 attributes.attribute_flags.value = 0;
7507 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7509 if (crtc_state->stream) {
7510 mutex_lock(&adev->dm.dc_lock);
7511 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7513 DRM_ERROR("DC failed to set cursor attributes\n");
7515 if (!dc_stream_set_cursor_position(crtc_state->stream,
7517 DRM_ERROR("DC failed to set cursor position\n");
7518 mutex_unlock(&adev->dm.dc_lock);
7522 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7525 assert_spin_locked(&acrtc->base.dev->event_lock);
7526 WARN_ON(acrtc->event);
7528 acrtc->event = acrtc->base.state->event;
7530 /* Set the flip status */
7531 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7533 /* Mark this event as consumed */
7534 acrtc->base.state->event = NULL;
7536 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7540 static void update_freesync_state_on_stream(
7541 struct amdgpu_display_manager *dm,
7542 struct dm_crtc_state *new_crtc_state,
7543 struct dc_stream_state *new_stream,
7544 struct dc_plane_state *surface,
7545 u32 flip_timestamp_in_us)
7547 struct mod_vrr_params vrr_params;
7548 struct dc_info_packet vrr_infopacket = {0};
7549 struct amdgpu_device *adev = dm->adev;
7550 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7551 unsigned long flags;
7557 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7558 * For now it's sufficient to just guard against these conditions.
7561 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7565 vrr_params = acrtc->dm_irq_params.vrr_params;
7568 mod_freesync_handle_preflip(
7569 dm->freesync_module,
7572 flip_timestamp_in_us,
7575 if (adev->family < AMDGPU_FAMILY_AI &&
7576 amdgpu_dm_vrr_active(new_crtc_state)) {
7577 mod_freesync_handle_v_update(dm->freesync_module,
7578 new_stream, &vrr_params);
7580 /* Need to call this before the frame ends. */
7581 dc_stream_adjust_vmin_vmax(dm->dc,
7582 new_crtc_state->stream,
7583 &vrr_params.adjust);
7587 mod_freesync_build_vrr_infopacket(
7588 dm->freesync_module,
7592 TRANSFER_FUNC_UNKNOWN,
7595 new_crtc_state->freesync_timing_changed |=
7596 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7598 sizeof(vrr_params.adjust)) != 0);
7600 new_crtc_state->freesync_vrr_info_changed |=
7601 (memcmp(&new_crtc_state->vrr_infopacket,
7603 sizeof(vrr_infopacket)) != 0);
7605 acrtc->dm_irq_params.vrr_params = vrr_params;
7606 new_crtc_state->vrr_infopacket = vrr_infopacket;
7608 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7609 new_stream->vrr_infopacket = vrr_infopacket;
7611 if (new_crtc_state->freesync_vrr_info_changed)
7612 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7613 new_crtc_state->base.crtc->base.id,
7614 (int)new_crtc_state->base.vrr_enabled,
7615 (int)vrr_params.state);
7617 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7620 static void update_stream_irq_parameters(
7621 struct amdgpu_display_manager *dm,
7622 struct dm_crtc_state *new_crtc_state)
7624 struct dc_stream_state *new_stream = new_crtc_state->stream;
7625 struct mod_vrr_params vrr_params;
7626 struct mod_freesync_config config = new_crtc_state->freesync_config;
7627 struct amdgpu_device *adev = dm->adev;
7628 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7629 unsigned long flags;
7635 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7636 * For now it's sufficient to just guard against these conditions.
7638 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7641 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7642 vrr_params = acrtc->dm_irq_params.vrr_params;
7644 if (new_crtc_state->vrr_supported &&
7645 config.min_refresh_in_uhz &&
7646 config.max_refresh_in_uhz) {
7647 config.state = new_crtc_state->base.vrr_enabled ?
7648 VRR_STATE_ACTIVE_VARIABLE :
7651 config.state = VRR_STATE_UNSUPPORTED;
7654 mod_freesync_build_vrr_params(dm->freesync_module,
7656 &config, &vrr_params);
7658 new_crtc_state->freesync_timing_changed |=
7659 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7660 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7662 new_crtc_state->freesync_config = config;
7663 /* Copy state for access from DM IRQ handler */
7664 acrtc->dm_irq_params.freesync_config = config;
7665 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7666 acrtc->dm_irq_params.vrr_params = vrr_params;
7667 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7670 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7671 struct dm_crtc_state *new_state)
7673 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7674 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7676 if (!old_vrr_active && new_vrr_active) {
7677 /* Transition VRR inactive -> active:
7678 * While VRR is active, we must not disable vblank irq, as a
7679 * reenable after disable would compute bogus vblank/pflip
7680 * timestamps if it likely happened inside display front-porch.
7682 * We also need vupdate irq for the actual core vblank handling
7685 dm_set_vupdate_irq(new_state->base.crtc, true);
7686 drm_crtc_vblank_get(new_state->base.crtc);
7687 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7688 __func__, new_state->base.crtc->base.id);
7689 } else if (old_vrr_active && !new_vrr_active) {
7690 /* Transition VRR active -> inactive:
7691 * Allow vblank irq disable again for fixed refresh rate.
7693 dm_set_vupdate_irq(new_state->base.crtc, false);
7694 drm_crtc_vblank_put(new_state->base.crtc);
7695 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7696 __func__, new_state->base.crtc->base.id);
7700 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7702 struct drm_plane *plane;
7703 struct drm_plane_state *old_plane_state, *new_plane_state;
7707 * TODO: Make this per-stream so we don't issue redundant updates for
7708 * commits with multiple streams.
7710 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7712 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7713 handle_cursor_update(plane, old_plane_state);
7716 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7717 struct dc_state *dc_state,
7718 struct drm_device *dev,
7719 struct amdgpu_display_manager *dm,
7720 struct drm_crtc *pcrtc,
7721 bool wait_for_vblank)
7724 uint64_t timestamp_ns;
7725 struct drm_plane *plane;
7726 struct drm_plane_state *old_plane_state, *new_plane_state;
7727 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7728 struct drm_crtc_state *new_pcrtc_state =
7729 drm_atomic_get_new_crtc_state(state, pcrtc);
7730 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7731 struct dm_crtc_state *dm_old_crtc_state =
7732 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7733 int planes_count = 0, vpos, hpos;
7735 unsigned long flags;
7736 struct amdgpu_bo *abo;
7737 uint32_t target_vblank, last_flip_vblank;
7738 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7739 bool pflip_present = false;
7741 struct dc_surface_update surface_updates[MAX_SURFACES];
7742 struct dc_plane_info plane_infos[MAX_SURFACES];
7743 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7744 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7745 struct dc_stream_update stream_update;
7748 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7751 dm_error("Failed to allocate update bundle\n");
7756 * Disable the cursor first if we're disabling all the planes.
7757 * It'll remain on the screen after the planes are re-enabled
7760 if (acrtc_state->active_planes == 0)
7761 amdgpu_dm_commit_cursors(state);
7763 /* update planes when needed */
7764 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7765 struct drm_crtc *crtc = new_plane_state->crtc;
7766 struct drm_crtc_state *new_crtc_state;
7767 struct drm_framebuffer *fb = new_plane_state->fb;
7768 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7769 bool plane_needs_flip;
7770 struct dc_plane_state *dc_plane;
7771 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7773 /* Cursor plane is handled after stream updates */
7774 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7777 if (!fb || !crtc || pcrtc != crtc)
7780 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7781 if (!new_crtc_state->active)
7784 dc_plane = dm_new_plane_state->dc_state;
7786 bundle->surface_updates[planes_count].surface = dc_plane;
7787 if (new_pcrtc_state->color_mgmt_changed) {
7788 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7789 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7790 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7793 fill_dc_scaling_info(new_plane_state,
7794 &bundle->scaling_infos[planes_count]);
7796 bundle->surface_updates[planes_count].scaling_info =
7797 &bundle->scaling_infos[planes_count];
7799 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7801 pflip_present = pflip_present || plane_needs_flip;
7803 if (!plane_needs_flip) {
7808 abo = gem_to_amdgpu_bo(fb->obj[0]);
7811 * Wait for all fences on this FB. Do limited wait to avoid
7812 * deadlock during GPU reset when this fence will not signal
7813 * but we hold reservation lock for the BO.
7815 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7817 msecs_to_jiffies(5000));
7818 if (unlikely(r <= 0))
7819 DRM_ERROR("Waiting for fences timed out!");
7821 fill_dc_plane_info_and_addr(
7822 dm->adev, new_plane_state,
7824 &bundle->plane_infos[planes_count],
7825 &bundle->flip_addrs[planes_count].address,
7826 afb->tmz_surface, false);
7828 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7829 new_plane_state->plane->index,
7830 bundle->plane_infos[planes_count].dcc.enable);
7832 bundle->surface_updates[planes_count].plane_info =
7833 &bundle->plane_infos[planes_count];
7836 * Only allow immediate flips for fast updates that don't
7837 * change FB pitch, DCC state, rotation or mirroing.
7839 bundle->flip_addrs[planes_count].flip_immediate =
7840 crtc->state->async_flip &&
7841 acrtc_state->update_type == UPDATE_TYPE_FAST;
7843 timestamp_ns = ktime_get_ns();
7844 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7845 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7846 bundle->surface_updates[planes_count].surface = dc_plane;
7848 if (!bundle->surface_updates[planes_count].surface) {
7849 DRM_ERROR("No surface for CRTC: id=%d\n",
7850 acrtc_attach->crtc_id);
7854 if (plane == pcrtc->primary)
7855 update_freesync_state_on_stream(
7858 acrtc_state->stream,
7860 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7862 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7864 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7865 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7871 if (pflip_present) {
7873 /* Use old throttling in non-vrr fixed refresh rate mode
7874 * to keep flip scheduling based on target vblank counts
7875 * working in a backwards compatible way, e.g., for
7876 * clients using the GLX_OML_sync_control extension or
7877 * DRI3/Present extension with defined target_msc.
7879 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7882 /* For variable refresh rate mode only:
7883 * Get vblank of last completed flip to avoid > 1 vrr
7884 * flips per video frame by use of throttling, but allow
7885 * flip programming anywhere in the possibly large
7886 * variable vrr vblank interval for fine-grained flip
7887 * timing control and more opportunity to avoid stutter
7888 * on late submission of flips.
7890 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7891 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7892 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7895 target_vblank = last_flip_vblank + wait_for_vblank;
7898 * Wait until we're out of the vertical blank period before the one
7899 * targeted by the flip
7901 while ((acrtc_attach->enabled &&
7902 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7903 0, &vpos, &hpos, NULL,
7904 NULL, &pcrtc->hwmode)
7905 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7906 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7907 (int)(target_vblank -
7908 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7909 usleep_range(1000, 1100);
7913 * Prepare the flip event for the pageflip interrupt to handle.
7915 * This only works in the case where we've already turned on the
7916 * appropriate hardware blocks (eg. HUBP) so in the transition case
7917 * from 0 -> n planes we have to skip a hardware generated event
7918 * and rely on sending it from software.
7920 if (acrtc_attach->base.state->event &&
7921 acrtc_state->active_planes > 0) {
7922 drm_crtc_vblank_get(pcrtc);
7924 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7926 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7927 prepare_flip_isr(acrtc_attach);
7929 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7932 if (acrtc_state->stream) {
7933 if (acrtc_state->freesync_vrr_info_changed)
7934 bundle->stream_update.vrr_infopacket =
7935 &acrtc_state->stream->vrr_infopacket;
7939 /* Update the planes if changed or disable if we don't have any. */
7940 if ((planes_count || acrtc_state->active_planes == 0) &&
7941 acrtc_state->stream) {
7942 bundle->stream_update.stream = acrtc_state->stream;
7943 if (new_pcrtc_state->mode_changed) {
7944 bundle->stream_update.src = acrtc_state->stream->src;
7945 bundle->stream_update.dst = acrtc_state->stream->dst;
7948 if (new_pcrtc_state->color_mgmt_changed) {
7950 * TODO: This isn't fully correct since we've actually
7951 * already modified the stream in place.
7953 bundle->stream_update.gamut_remap =
7954 &acrtc_state->stream->gamut_remap_matrix;
7955 bundle->stream_update.output_csc_transform =
7956 &acrtc_state->stream->csc_color_matrix;
7957 bundle->stream_update.out_transfer_func =
7958 acrtc_state->stream->out_transfer_func;
7961 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7962 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7963 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7966 * If FreeSync state on the stream has changed then we need to
7967 * re-adjust the min/max bounds now that DC doesn't handle this
7968 * as part of commit.
7970 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7971 amdgpu_dm_vrr_active(acrtc_state)) {
7972 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7973 dc_stream_adjust_vmin_vmax(
7974 dm->dc, acrtc_state->stream,
7975 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7976 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7978 mutex_lock(&dm->dc_lock);
7979 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7980 acrtc_state->stream->link->psr_settings.psr_allow_active)
7981 amdgpu_dm_psr_disable(acrtc_state->stream);
7983 dc_commit_updates_for_stream(dm->dc,
7984 bundle->surface_updates,
7986 acrtc_state->stream,
7987 &bundle->stream_update,
7991 * Enable or disable the interrupts on the backend.
7993 * Most pipes are put into power gating when unused.
7995 * When power gating is enabled on a pipe we lose the
7996 * interrupt enablement state when power gating is disabled.
7998 * So we need to update the IRQ control state in hardware
7999 * whenever the pipe turns on (since it could be previously
8000 * power gated) or off (since some pipes can't be power gated
8003 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8004 dm_update_pflip_irq_state(drm_to_adev(dev),
8007 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8008 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8009 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8010 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8011 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8012 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8013 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8014 amdgpu_dm_psr_enable(acrtc_state->stream);
8017 mutex_unlock(&dm->dc_lock);
8021 * Update cursor state *after* programming all the planes.
8022 * This avoids redundant programming in the case where we're going
8023 * to be disabling a single plane - those pipes are being disabled.
8025 if (acrtc_state->active_planes)
8026 amdgpu_dm_commit_cursors(state);
8032 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8033 struct drm_atomic_state *state)
8035 struct amdgpu_device *adev = drm_to_adev(dev);
8036 struct amdgpu_dm_connector *aconnector;
8037 struct drm_connector *connector;
8038 struct drm_connector_state *old_con_state, *new_con_state;
8039 struct drm_crtc_state *new_crtc_state;
8040 struct dm_crtc_state *new_dm_crtc_state;
8041 const struct dc_stream_status *status;
8044 /* Notify device removals. */
8045 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8046 if (old_con_state->crtc != new_con_state->crtc) {
8047 /* CRTC changes require notification. */
8051 if (!new_con_state->crtc)
8054 new_crtc_state = drm_atomic_get_new_crtc_state(
8055 state, new_con_state->crtc);
8057 if (!new_crtc_state)
8060 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8064 aconnector = to_amdgpu_dm_connector(connector);
8066 mutex_lock(&adev->dm.audio_lock);
8067 inst = aconnector->audio_inst;
8068 aconnector->audio_inst = -1;
8069 mutex_unlock(&adev->dm.audio_lock);
8071 amdgpu_dm_audio_eld_notify(adev, inst);
8074 /* Notify audio device additions. */
8075 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8076 if (!new_con_state->crtc)
8079 new_crtc_state = drm_atomic_get_new_crtc_state(
8080 state, new_con_state->crtc);
8082 if (!new_crtc_state)
8085 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8088 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8089 if (!new_dm_crtc_state->stream)
8092 status = dc_stream_get_status(new_dm_crtc_state->stream);
8096 aconnector = to_amdgpu_dm_connector(connector);
8098 mutex_lock(&adev->dm.audio_lock);
8099 inst = status->audio_inst;
8100 aconnector->audio_inst = inst;
8101 mutex_unlock(&adev->dm.audio_lock);
8103 amdgpu_dm_audio_eld_notify(adev, inst);
8108 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8109 * @crtc_state: the DRM CRTC state
8110 * @stream_state: the DC stream state.
8112 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8113 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8115 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8116 struct dc_stream_state *stream_state)
8118 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8122 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8123 * @state: The atomic state to commit
8125 * This will tell DC to commit the constructed DC state from atomic_check,
8126 * programming the hardware. Any failures here implies a hardware failure, since
8127 * atomic check should have filtered anything non-kosher.
8129 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8131 struct drm_device *dev = state->dev;
8132 struct amdgpu_device *adev = drm_to_adev(dev);
8133 struct amdgpu_display_manager *dm = &adev->dm;
8134 struct dm_atomic_state *dm_state;
8135 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8137 struct drm_crtc *crtc;
8138 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8139 unsigned long flags;
8140 bool wait_for_vblank = true;
8141 struct drm_connector *connector;
8142 struct drm_connector_state *old_con_state, *new_con_state;
8143 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8144 int crtc_disable_count = 0;
8145 bool mode_set_reset_required = false;
8147 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8149 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8151 dm_state = dm_atomic_get_new_state(state);
8152 if (dm_state && dm_state->context) {
8153 dc_state = dm_state->context;
8155 /* No state changes, retain current state. */
8156 dc_state_temp = dc_create_state(dm->dc);
8157 ASSERT(dc_state_temp);
8158 dc_state = dc_state_temp;
8159 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8162 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8163 new_crtc_state, i) {
8164 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8166 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8168 if (old_crtc_state->active &&
8169 (!new_crtc_state->active ||
8170 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8171 manage_dm_interrupts(adev, acrtc, false);
8172 dc_stream_release(dm_old_crtc_state->stream);
8176 drm_atomic_helper_calc_timestamping_constants(state);
8178 /* update changed items */
8179 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8180 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8182 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8183 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8186 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8187 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8188 "connectors_changed:%d\n",
8190 new_crtc_state->enable,
8191 new_crtc_state->active,
8192 new_crtc_state->planes_changed,
8193 new_crtc_state->mode_changed,
8194 new_crtc_state->active_changed,
8195 new_crtc_state->connectors_changed);
8197 /* Disable cursor if disabling crtc */
8198 if (old_crtc_state->active && !new_crtc_state->active) {
8199 struct dc_cursor_position position;
8201 memset(&position, 0, sizeof(position));
8202 mutex_lock(&dm->dc_lock);
8203 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8204 mutex_unlock(&dm->dc_lock);
8207 /* Copy all transient state flags into dc state */
8208 if (dm_new_crtc_state->stream) {
8209 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8210 dm_new_crtc_state->stream);
8213 /* handles headless hotplug case, updating new_state and
8214 * aconnector as needed
8217 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8219 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8221 if (!dm_new_crtc_state->stream) {
8223 * this could happen because of issues with
8224 * userspace notifications delivery.
8225 * In this case userspace tries to set mode on
8226 * display which is disconnected in fact.
8227 * dc_sink is NULL in this case on aconnector.
8228 * We expect reset mode will come soon.
8230 * This can also happen when unplug is done
8231 * during resume sequence ended
8233 * In this case, we want to pretend we still
8234 * have a sink to keep the pipe running so that
8235 * hw state is consistent with the sw state
8237 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8238 __func__, acrtc->base.base.id);
8242 if (dm_old_crtc_state->stream)
8243 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8245 pm_runtime_get_noresume(dev->dev);
8247 acrtc->enabled = true;
8248 acrtc->hw_mode = new_crtc_state->mode;
8249 crtc->hwmode = new_crtc_state->mode;
8250 mode_set_reset_required = true;
8251 } else if (modereset_required(new_crtc_state)) {
8252 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8253 /* i.e. reset mode */
8254 if (dm_old_crtc_state->stream)
8255 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8256 mode_set_reset_required = true;
8258 } /* for_each_crtc_in_state() */
8261 /* if there mode set or reset, disable eDP PSR */
8262 if (mode_set_reset_required)
8263 amdgpu_dm_psr_disable_all(dm);
8265 dm_enable_per_frame_crtc_master_sync(dc_state);
8266 mutex_lock(&dm->dc_lock);
8267 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8268 mutex_unlock(&dm->dc_lock);
8271 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8272 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8274 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8276 if (dm_new_crtc_state->stream != NULL) {
8277 const struct dc_stream_status *status =
8278 dc_stream_get_status(dm_new_crtc_state->stream);
8281 status = dc_stream_get_status_from_state(dc_state,
8282 dm_new_crtc_state->stream);
8284 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8286 acrtc->otg_inst = status->primary_otg_inst;
8289 #ifdef CONFIG_DRM_AMD_DC_HDCP
8290 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8291 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8292 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8293 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8295 new_crtc_state = NULL;
8298 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8300 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8302 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8303 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8304 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8305 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8306 dm_new_con_state->update_hdcp = true;
8310 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8311 hdcp_update_display(
8312 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8313 new_con_state->hdcp_content_type,
8314 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8319 /* Handle connector state changes */
8320 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8321 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8322 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8323 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8324 struct dc_surface_update dummy_updates[MAX_SURFACES];
8325 struct dc_stream_update stream_update;
8326 struct dc_info_packet hdr_packet;
8327 struct dc_stream_status *status = NULL;
8328 bool abm_changed, hdr_changed, scaling_changed;
8330 memset(&dummy_updates, 0, sizeof(dummy_updates));
8331 memset(&stream_update, 0, sizeof(stream_update));
8334 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8335 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8338 /* Skip any modesets/resets */
8339 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8342 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8343 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8345 scaling_changed = is_scaling_state_different(dm_new_con_state,
8348 abm_changed = dm_new_crtc_state->abm_level !=
8349 dm_old_crtc_state->abm_level;
8352 is_hdr_metadata_different(old_con_state, new_con_state);
8354 if (!scaling_changed && !abm_changed && !hdr_changed)
8357 stream_update.stream = dm_new_crtc_state->stream;
8358 if (scaling_changed) {
8359 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8360 dm_new_con_state, dm_new_crtc_state->stream);
8362 stream_update.src = dm_new_crtc_state->stream->src;
8363 stream_update.dst = dm_new_crtc_state->stream->dst;
8367 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8369 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8373 fill_hdr_info_packet(new_con_state, &hdr_packet);
8374 stream_update.hdr_static_metadata = &hdr_packet;
8377 status = dc_stream_get_status(dm_new_crtc_state->stream);
8379 WARN_ON(!status->plane_count);
8382 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8383 * Here we create an empty update on each plane.
8384 * To fix this, DC should permit updating only stream properties.
8386 for (j = 0; j < status->plane_count; j++)
8387 dummy_updates[j].surface = status->plane_states[0];
8390 mutex_lock(&dm->dc_lock);
8391 dc_commit_updates_for_stream(dm->dc,
8393 status->plane_count,
8394 dm_new_crtc_state->stream,
8397 mutex_unlock(&dm->dc_lock);
8400 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8401 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8402 new_crtc_state, i) {
8403 if (old_crtc_state->active && !new_crtc_state->active)
8404 crtc_disable_count++;
8406 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8407 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8409 /* For freesync config update on crtc state and params for irq */
8410 update_stream_irq_parameters(dm, dm_new_crtc_state);
8412 /* Handle vrr on->off / off->on transitions */
8413 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8418 * Enable interrupts for CRTCs that are newly enabled or went through
8419 * a modeset. It was intentionally deferred until after the front end
8420 * state was modified to wait until the OTG was on and so the IRQ
8421 * handlers didn't access stale or invalid state.
8423 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8424 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8426 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8428 if (new_crtc_state->active &&
8429 (!old_crtc_state->active ||
8430 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8431 dc_stream_retain(dm_new_crtc_state->stream);
8432 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8433 manage_dm_interrupts(adev, acrtc, true);
8435 #ifdef CONFIG_DEBUG_FS
8437 * Frontend may have changed so reapply the CRC capture
8438 * settings for the stream.
8440 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8442 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8443 amdgpu_dm_crtc_configure_crc_source(
8444 crtc, dm_new_crtc_state,
8445 dm_new_crtc_state->crc_src);
8451 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8452 if (new_crtc_state->async_flip)
8453 wait_for_vblank = false;
8455 /* update planes when needed per crtc*/
8456 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8457 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8459 if (dm_new_crtc_state->stream)
8460 amdgpu_dm_commit_planes(state, dc_state, dev,
8461 dm, crtc, wait_for_vblank);
8464 /* Update audio instances for each connector. */
8465 amdgpu_dm_commit_audio(dev, state);
8468 * send vblank event on all events not handled in flip and
8469 * mark consumed event for drm_atomic_helper_commit_hw_done
8471 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8472 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8474 if (new_crtc_state->event)
8475 drm_send_event_locked(dev, &new_crtc_state->event->base);
8477 new_crtc_state->event = NULL;
8479 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8481 /* Signal HW programming completion */
8482 drm_atomic_helper_commit_hw_done(state);
8484 if (wait_for_vblank)
8485 drm_atomic_helper_wait_for_flip_done(dev, state);
8487 drm_atomic_helper_cleanup_planes(dev, state);
8489 /* return the stolen vga memory back to VRAM */
8490 if (!adev->mman.keep_stolen_vga_memory)
8491 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8492 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8495 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8496 * so we can put the GPU into runtime suspend if we're not driving any
8499 for (i = 0; i < crtc_disable_count; i++)
8500 pm_runtime_put_autosuspend(dev->dev);
8501 pm_runtime_mark_last_busy(dev->dev);
8504 dc_release_state(dc_state_temp);
8508 static int dm_force_atomic_commit(struct drm_connector *connector)
8511 struct drm_device *ddev = connector->dev;
8512 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8513 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8514 struct drm_plane *plane = disconnected_acrtc->base.primary;
8515 struct drm_connector_state *conn_state;
8516 struct drm_crtc_state *crtc_state;
8517 struct drm_plane_state *plane_state;
8522 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8524 /* Construct an atomic state to restore previous display setting */
8527 * Attach connectors to drm_atomic_state
8529 conn_state = drm_atomic_get_connector_state(state, connector);
8531 ret = PTR_ERR_OR_ZERO(conn_state);
8535 /* Attach crtc to drm_atomic_state*/
8536 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8538 ret = PTR_ERR_OR_ZERO(crtc_state);
8542 /* force a restore */
8543 crtc_state->mode_changed = true;
8545 /* Attach plane to drm_atomic_state */
8546 plane_state = drm_atomic_get_plane_state(state, plane);
8548 ret = PTR_ERR_OR_ZERO(plane_state);
8552 /* Call commit internally with the state we just constructed */
8553 ret = drm_atomic_commit(state);
8556 drm_atomic_state_put(state);
8558 DRM_ERROR("Restoring old state failed with %i\n", ret);
8564 * This function handles all cases when set mode does not come upon hotplug.
8565 * This includes when a display is unplugged then plugged back into the
8566 * same port and when running without usermode desktop manager supprot
8568 void dm_restore_drm_connector_state(struct drm_device *dev,
8569 struct drm_connector *connector)
8571 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8572 struct amdgpu_crtc *disconnected_acrtc;
8573 struct dm_crtc_state *acrtc_state;
8575 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8578 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8579 if (!disconnected_acrtc)
8582 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8583 if (!acrtc_state->stream)
8587 * If the previous sink is not released and different from the current,
8588 * we deduce we are in a state where we can not rely on usermode call
8589 * to turn on the display, so we do it here
8591 if (acrtc_state->stream->sink != aconnector->dc_sink)
8592 dm_force_atomic_commit(&aconnector->base);
8596 * Grabs all modesetting locks to serialize against any blocking commits,
8597 * Waits for completion of all non blocking commits.
8599 static int do_aquire_global_lock(struct drm_device *dev,
8600 struct drm_atomic_state *state)
8602 struct drm_crtc *crtc;
8603 struct drm_crtc_commit *commit;
8607 * Adding all modeset locks to aquire_ctx will
8608 * ensure that when the framework release it the
8609 * extra locks we are locking here will get released to
8611 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8615 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8616 spin_lock(&crtc->commit_lock);
8617 commit = list_first_entry_or_null(&crtc->commit_list,
8618 struct drm_crtc_commit, commit_entry);
8620 drm_crtc_commit_get(commit);
8621 spin_unlock(&crtc->commit_lock);
8627 * Make sure all pending HW programming completed and
8630 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8633 ret = wait_for_completion_interruptible_timeout(
8634 &commit->flip_done, 10*HZ);
8637 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8638 "timed out\n", crtc->base.id, crtc->name);
8640 drm_crtc_commit_put(commit);
8643 return ret < 0 ? ret : 0;
8646 static void get_freesync_config_for_crtc(
8647 struct dm_crtc_state *new_crtc_state,
8648 struct dm_connector_state *new_con_state)
8650 struct mod_freesync_config config = {0};
8651 struct amdgpu_dm_connector *aconnector =
8652 to_amdgpu_dm_connector(new_con_state->base.connector);
8653 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8654 int vrefresh = drm_mode_vrefresh(mode);
8656 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8657 vrefresh >= aconnector->min_vfreq &&
8658 vrefresh <= aconnector->max_vfreq;
8660 if (new_crtc_state->vrr_supported) {
8661 new_crtc_state->stream->ignore_msa_timing_param = true;
8662 config.state = new_crtc_state->base.vrr_enabled ?
8663 VRR_STATE_ACTIVE_VARIABLE :
8665 config.min_refresh_in_uhz =
8666 aconnector->min_vfreq * 1000000;
8667 config.max_refresh_in_uhz =
8668 aconnector->max_vfreq * 1000000;
8669 config.vsif_supported = true;
8673 new_crtc_state->freesync_config = config;
8676 static void reset_freesync_config_for_crtc(
8677 struct dm_crtc_state *new_crtc_state)
8679 new_crtc_state->vrr_supported = false;
8681 memset(&new_crtc_state->vrr_infopacket, 0,
8682 sizeof(new_crtc_state->vrr_infopacket));
8685 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8686 struct drm_atomic_state *state,
8687 struct drm_crtc *crtc,
8688 struct drm_crtc_state *old_crtc_state,
8689 struct drm_crtc_state *new_crtc_state,
8691 bool *lock_and_validation_needed)
8693 struct dm_atomic_state *dm_state = NULL;
8694 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8695 struct dc_stream_state *new_stream;
8699 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8700 * update changed items
8702 struct amdgpu_crtc *acrtc = NULL;
8703 struct amdgpu_dm_connector *aconnector = NULL;
8704 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8705 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8709 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8710 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8711 acrtc = to_amdgpu_crtc(crtc);
8712 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8714 /* TODO This hack should go away */
8715 if (aconnector && enable) {
8716 /* Make sure fake sink is created in plug-in scenario */
8717 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8719 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8722 if (IS_ERR(drm_new_conn_state)) {
8723 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8727 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8728 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8730 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8733 new_stream = create_validate_stream_for_sink(aconnector,
8734 &new_crtc_state->mode,
8736 dm_old_crtc_state->stream);
8739 * we can have no stream on ACTION_SET if a display
8740 * was disconnected during S3, in this case it is not an
8741 * error, the OS will be updated after detection, and
8742 * will do the right thing on next atomic commit
8746 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8747 __func__, acrtc->base.base.id);
8753 * TODO: Check VSDB bits to decide whether this should
8754 * be enabled or not.
8756 new_stream->triggered_crtc_reset.enabled =
8757 dm->force_timing_sync;
8759 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8761 ret = fill_hdr_info_packet(drm_new_conn_state,
8762 &new_stream->hdr_static_metadata);
8767 * If we already removed the old stream from the context
8768 * (and set the new stream to NULL) then we can't reuse
8769 * the old stream even if the stream and scaling are unchanged.
8770 * We'll hit the BUG_ON and black screen.
8772 * TODO: Refactor this function to allow this check to work
8773 * in all conditions.
8775 if (dm_new_crtc_state->stream &&
8776 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8777 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8778 new_crtc_state->mode_changed = false;
8779 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8780 new_crtc_state->mode_changed);
8784 /* mode_changed flag may get updated above, need to check again */
8785 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8789 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8790 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8791 "connectors_changed:%d\n",
8793 new_crtc_state->enable,
8794 new_crtc_state->active,
8795 new_crtc_state->planes_changed,
8796 new_crtc_state->mode_changed,
8797 new_crtc_state->active_changed,
8798 new_crtc_state->connectors_changed);
8800 /* Remove stream for any changed/disabled CRTC */
8803 if (!dm_old_crtc_state->stream)
8806 ret = dm_atomic_get_state(state, &dm_state);
8810 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8813 /* i.e. reset mode */
8814 if (dc_remove_stream_from_ctx(
8817 dm_old_crtc_state->stream) != DC_OK) {
8822 dc_stream_release(dm_old_crtc_state->stream);
8823 dm_new_crtc_state->stream = NULL;
8825 reset_freesync_config_for_crtc(dm_new_crtc_state);
8827 *lock_and_validation_needed = true;
8829 } else {/* Add stream for any updated/enabled CRTC */
8831 * Quick fix to prevent NULL pointer on new_stream when
8832 * added MST connectors not found in existing crtc_state in the chained mode
8833 * TODO: need to dig out the root cause of that
8835 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8838 if (modereset_required(new_crtc_state))
8841 if (modeset_required(new_crtc_state, new_stream,
8842 dm_old_crtc_state->stream)) {
8844 WARN_ON(dm_new_crtc_state->stream);
8846 ret = dm_atomic_get_state(state, &dm_state);
8850 dm_new_crtc_state->stream = new_stream;
8852 dc_stream_retain(new_stream);
8854 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8857 if (dc_add_stream_to_ctx(
8860 dm_new_crtc_state->stream) != DC_OK) {
8865 *lock_and_validation_needed = true;
8870 /* Release extra reference */
8872 dc_stream_release(new_stream);
8875 * We want to do dc stream updates that do not require a
8876 * full modeset below.
8878 if (!(enable && aconnector && new_crtc_state->active))
8881 * Given above conditions, the dc state cannot be NULL because:
8882 * 1. We're in the process of enabling CRTCs (just been added
8883 * to the dc context, or already is on the context)
8884 * 2. Has a valid connector attached, and
8885 * 3. Is currently active and enabled.
8886 * => The dc stream state currently exists.
8888 BUG_ON(dm_new_crtc_state->stream == NULL);
8890 /* Scaling or underscan settings */
8891 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8892 update_stream_scaling_settings(
8893 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8896 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8899 * Color management settings. We also update color properties
8900 * when a modeset is needed, to ensure it gets reprogrammed.
8902 if (dm_new_crtc_state->base.color_mgmt_changed ||
8903 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8904 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8909 /* Update Freesync settings. */
8910 get_freesync_config_for_crtc(dm_new_crtc_state,
8917 dc_stream_release(new_stream);
8921 static bool should_reset_plane(struct drm_atomic_state *state,
8922 struct drm_plane *plane,
8923 struct drm_plane_state *old_plane_state,
8924 struct drm_plane_state *new_plane_state)
8926 struct drm_plane *other;
8927 struct drm_plane_state *old_other_state, *new_other_state;
8928 struct drm_crtc_state *new_crtc_state;
8932 * TODO: Remove this hack once the checks below are sufficient
8933 * enough to determine when we need to reset all the planes on
8936 if (state->allow_modeset)
8939 /* Exit early if we know that we're adding or removing the plane. */
8940 if (old_plane_state->crtc != new_plane_state->crtc)
8943 /* old crtc == new_crtc == NULL, plane not in context. */
8944 if (!new_plane_state->crtc)
8948 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8950 if (!new_crtc_state)
8953 /* CRTC Degamma changes currently require us to recreate planes. */
8954 if (new_crtc_state->color_mgmt_changed)
8957 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8961 * If there are any new primary or overlay planes being added or
8962 * removed then the z-order can potentially change. To ensure
8963 * correct z-order and pipe acquisition the current DC architecture
8964 * requires us to remove and recreate all existing planes.
8966 * TODO: Come up with a more elegant solution for this.
8968 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8969 struct amdgpu_framebuffer *old_afb, *new_afb;
8970 if (other->type == DRM_PLANE_TYPE_CURSOR)
8973 if (old_other_state->crtc != new_plane_state->crtc &&
8974 new_other_state->crtc != new_plane_state->crtc)
8977 if (old_other_state->crtc != new_other_state->crtc)
8980 /* Src/dst size and scaling updates. */
8981 if (old_other_state->src_w != new_other_state->src_w ||
8982 old_other_state->src_h != new_other_state->src_h ||
8983 old_other_state->crtc_w != new_other_state->crtc_w ||
8984 old_other_state->crtc_h != new_other_state->crtc_h)
8987 /* Rotation / mirroring updates. */
8988 if (old_other_state->rotation != new_other_state->rotation)
8991 /* Blending updates. */
8992 if (old_other_state->pixel_blend_mode !=
8993 new_other_state->pixel_blend_mode)
8996 /* Alpha updates. */
8997 if (old_other_state->alpha != new_other_state->alpha)
9000 /* Colorspace changes. */
9001 if (old_other_state->color_range != new_other_state->color_range ||
9002 old_other_state->color_encoding != new_other_state->color_encoding)
9005 /* Framebuffer checks fall at the end. */
9006 if (!old_other_state->fb || !new_other_state->fb)
9009 /* Pixel format changes can require bandwidth updates. */
9010 if (old_other_state->fb->format != new_other_state->fb->format)
9013 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9014 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9016 /* Tiling and DCC changes also require bandwidth updates. */
9017 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9018 old_afb->base.modifier != new_afb->base.modifier)
9025 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9026 struct drm_plane_state *new_plane_state,
9027 struct drm_framebuffer *fb)
9029 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9030 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9034 if (fb->width > new_acrtc->max_cursor_width ||
9035 fb->height > new_acrtc->max_cursor_height) {
9036 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9037 new_plane_state->fb->width,
9038 new_plane_state->fb->height);
9041 if (new_plane_state->src_w != fb->width << 16 ||
9042 new_plane_state->src_h != fb->height << 16) {
9043 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9047 /* Pitch in pixels */
9048 pitch = fb->pitches[0] / fb->format->cpp[0];
9050 if (fb->width != pitch) {
9051 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9060 /* FB pitch is supported by cursor plane */
9063 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9067 /* Core DRM takes care of checking FB modifiers, so we only need to
9068 * check tiling flags when the FB doesn't have a modifier. */
9069 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9070 if (adev->family < AMDGPU_FAMILY_AI) {
9071 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9072 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9073 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9075 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9078 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9086 static int dm_update_plane_state(struct dc *dc,
9087 struct drm_atomic_state *state,
9088 struct drm_plane *plane,
9089 struct drm_plane_state *old_plane_state,
9090 struct drm_plane_state *new_plane_state,
9092 bool *lock_and_validation_needed)
9095 struct dm_atomic_state *dm_state = NULL;
9096 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9097 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9098 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9099 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9100 struct amdgpu_crtc *new_acrtc;
9105 new_plane_crtc = new_plane_state->crtc;
9106 old_plane_crtc = old_plane_state->crtc;
9107 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9108 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9110 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9111 if (!enable || !new_plane_crtc ||
9112 drm_atomic_plane_disabling(plane->state, new_plane_state))
9115 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9117 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9118 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9122 if (new_plane_state->fb) {
9123 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9124 new_plane_state->fb);
9132 needs_reset = should_reset_plane(state, plane, old_plane_state,
9135 /* Remove any changed/removed planes */
9140 if (!old_plane_crtc)
9143 old_crtc_state = drm_atomic_get_old_crtc_state(
9144 state, old_plane_crtc);
9145 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9147 if (!dm_old_crtc_state->stream)
9150 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9151 plane->base.id, old_plane_crtc->base.id);
9153 ret = dm_atomic_get_state(state, &dm_state);
9157 if (!dc_remove_plane_from_context(
9159 dm_old_crtc_state->stream,
9160 dm_old_plane_state->dc_state,
9161 dm_state->context)) {
9167 dc_plane_state_release(dm_old_plane_state->dc_state);
9168 dm_new_plane_state->dc_state = NULL;
9170 *lock_and_validation_needed = true;
9172 } else { /* Add new planes */
9173 struct dc_plane_state *dc_new_plane_state;
9175 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9178 if (!new_plane_crtc)
9181 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9182 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9184 if (!dm_new_crtc_state->stream)
9190 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9194 WARN_ON(dm_new_plane_state->dc_state);
9196 dc_new_plane_state = dc_create_plane_state(dc);
9197 if (!dc_new_plane_state)
9200 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9201 plane->base.id, new_plane_crtc->base.id);
9203 ret = fill_dc_plane_attributes(
9204 drm_to_adev(new_plane_crtc->dev),
9209 dc_plane_state_release(dc_new_plane_state);
9213 ret = dm_atomic_get_state(state, &dm_state);
9215 dc_plane_state_release(dc_new_plane_state);
9220 * Any atomic check errors that occur after this will
9221 * not need a release. The plane state will be attached
9222 * to the stream, and therefore part of the atomic
9223 * state. It'll be released when the atomic state is
9226 if (!dc_add_plane_to_context(
9228 dm_new_crtc_state->stream,
9230 dm_state->context)) {
9232 dc_plane_state_release(dc_new_plane_state);
9236 dm_new_plane_state->dc_state = dc_new_plane_state;
9238 /* Tell DC to do a full surface update every time there
9239 * is a plane change. Inefficient, but works for now.
9241 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9243 *lock_and_validation_needed = true;
9250 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9251 struct drm_crtc *crtc,
9252 struct drm_crtc_state *new_crtc_state)
9254 struct drm_plane_state *new_cursor_state, *new_primary_state;
9255 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9257 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9258 * cursor per pipe but it's going to inherit the scaling and
9259 * positioning from the underlying pipe. Check the cursor plane's
9260 * blending properties match the primary plane's. */
9262 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9263 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9264 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9268 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9269 (new_cursor_state->src_w >> 16);
9270 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9271 (new_cursor_state->src_h >> 16);
9273 primary_scale_w = new_primary_state->crtc_w * 1000 /
9274 (new_primary_state->src_w >> 16);
9275 primary_scale_h = new_primary_state->crtc_h * 1000 /
9276 (new_primary_state->src_h >> 16);
9278 if (cursor_scale_w != primary_scale_w ||
9279 cursor_scale_h != primary_scale_h) {
9280 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9287 #if defined(CONFIG_DRM_AMD_DC_DCN)
9288 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9290 struct drm_connector *connector;
9291 struct drm_connector_state *conn_state;
9292 struct amdgpu_dm_connector *aconnector = NULL;
9294 for_each_new_connector_in_state(state, connector, conn_state, i) {
9295 if (conn_state->crtc != crtc)
9298 aconnector = to_amdgpu_dm_connector(connector);
9299 if (!aconnector->port || !aconnector->mst_port)
9308 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9313 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9314 * @dev: The DRM device
9315 * @state: The atomic state to commit
9317 * Validate that the given atomic state is programmable by DC into hardware.
9318 * This involves constructing a &struct dc_state reflecting the new hardware
9319 * state we wish to commit, then querying DC to see if it is programmable. It's
9320 * important not to modify the existing DC state. Otherwise, atomic_check
9321 * may unexpectedly commit hardware changes.
9323 * When validating the DC state, it's important that the right locks are
9324 * acquired. For full updates case which removes/adds/updates streams on one
9325 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9326 * that any such full update commit will wait for completion of any outstanding
9327 * flip using DRMs synchronization events.
9329 * Note that DM adds the affected connectors for all CRTCs in state, when that
9330 * might not seem necessary. This is because DC stream creation requires the
9331 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9332 * be possible but non-trivial - a possible TODO item.
9334 * Return: -Error code if validation failed.
9336 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9337 struct drm_atomic_state *state)
9339 struct amdgpu_device *adev = drm_to_adev(dev);
9340 struct dm_atomic_state *dm_state = NULL;
9341 struct dc *dc = adev->dm.dc;
9342 struct drm_connector *connector;
9343 struct drm_connector_state *old_con_state, *new_con_state;
9344 struct drm_crtc *crtc;
9345 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9346 struct drm_plane *plane;
9347 struct drm_plane_state *old_plane_state, *new_plane_state;
9348 enum dc_status status;
9350 bool lock_and_validation_needed = false;
9351 struct dm_crtc_state *dm_old_crtc_state;
9353 trace_amdgpu_dm_atomic_check_begin(state);
9355 ret = drm_atomic_helper_check_modeset(dev, state);
9359 /* Check connector changes */
9360 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9361 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9362 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9364 /* Skip connectors that are disabled or part of modeset already. */
9365 if (!old_con_state->crtc && !new_con_state->crtc)
9368 if (!new_con_state->crtc)
9371 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9372 if (IS_ERR(new_crtc_state)) {
9373 ret = PTR_ERR(new_crtc_state);
9377 if (dm_old_con_state->abm_level !=
9378 dm_new_con_state->abm_level)
9379 new_crtc_state->connectors_changed = true;
9382 #if defined(CONFIG_DRM_AMD_DC_DCN)
9383 if (adev->asic_type >= CHIP_NAVI10) {
9384 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9385 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9386 ret = add_affected_mst_dsc_crtcs(state, crtc);
9393 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9394 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9396 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9397 !new_crtc_state->color_mgmt_changed &&
9398 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9399 dm_old_crtc_state->dsc_force_changed == false)
9402 if (!new_crtc_state->enable)
9405 ret = drm_atomic_add_affected_connectors(state, crtc);
9409 ret = drm_atomic_add_affected_planes(state, crtc);
9413 if (dm_old_crtc_state->dsc_force_changed)
9414 new_crtc_state->mode_changed = true;
9418 * Add all primary and overlay planes on the CRTC to the state
9419 * whenever a plane is enabled to maintain correct z-ordering
9420 * and to enable fast surface updates.
9422 drm_for_each_crtc(crtc, dev) {
9423 bool modified = false;
9425 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9426 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9429 if (new_plane_state->crtc == crtc ||
9430 old_plane_state->crtc == crtc) {
9439 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9440 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9444 drm_atomic_get_plane_state(state, plane);
9446 if (IS_ERR(new_plane_state)) {
9447 ret = PTR_ERR(new_plane_state);
9453 /* Remove exiting planes if they are modified */
9454 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9455 ret = dm_update_plane_state(dc, state, plane,
9459 &lock_and_validation_needed);
9464 /* Disable all crtcs which require disable */
9465 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9466 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9470 &lock_and_validation_needed);
9475 /* Enable all crtcs which require enable */
9476 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9477 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9481 &lock_and_validation_needed);
9486 /* Add new/modified planes */
9487 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9488 ret = dm_update_plane_state(dc, state, plane,
9492 &lock_and_validation_needed);
9497 /* Run this here since we want to validate the streams we created */
9498 ret = drm_atomic_helper_check_planes(dev, state);
9502 /* Check cursor planes scaling */
9503 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9504 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9509 if (state->legacy_cursor_update) {
9511 * This is a fast cursor update coming from the plane update
9512 * helper, check if it can be done asynchronously for better
9515 state->async_update =
9516 !drm_atomic_helper_async_check(dev, state);
9519 * Skip the remaining global validation if this is an async
9520 * update. Cursor updates can be done without affecting
9521 * state or bandwidth calcs and this avoids the performance
9522 * penalty of locking the private state object and
9523 * allocating a new dc_state.
9525 if (state->async_update)
9529 /* Check scaling and underscan changes*/
9530 /* TODO Removed scaling changes validation due to inability to commit
9531 * new stream into context w\o causing full reset. Need to
9532 * decide how to handle.
9534 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9535 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9536 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9537 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9539 /* Skip any modesets/resets */
9540 if (!acrtc || drm_atomic_crtc_needs_modeset(
9541 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9544 /* Skip any thing not scale or underscan changes */
9545 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9548 lock_and_validation_needed = true;
9552 * Streams and planes are reset when there are changes that affect
9553 * bandwidth. Anything that affects bandwidth needs to go through
9554 * DC global validation to ensure that the configuration can be applied
9557 * We have to currently stall out here in atomic_check for outstanding
9558 * commits to finish in this case because our IRQ handlers reference
9559 * DRM state directly - we can end up disabling interrupts too early
9562 * TODO: Remove this stall and drop DM state private objects.
9564 if (lock_and_validation_needed) {
9565 ret = dm_atomic_get_state(state, &dm_state);
9569 ret = do_aquire_global_lock(dev, state);
9573 #if defined(CONFIG_DRM_AMD_DC_DCN)
9574 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9577 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9583 * Perform validation of MST topology in the state:
9584 * We need to perform MST atomic check before calling
9585 * dc_validate_global_state(), or there is a chance
9586 * to get stuck in an infinite loop and hang eventually.
9588 ret = drm_dp_mst_atomic_check(state);
9591 status = dc_validate_global_state(dc, dm_state->context, false);
9592 if (status != DC_OK) {
9593 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9594 dc_status_to_str(status), status);
9600 * The commit is a fast update. Fast updates shouldn't change
9601 * the DC context, affect global validation, and can have their
9602 * commit work done in parallel with other commits not touching
9603 * the same resource. If we have a new DC context as part of
9604 * the DM atomic state from validation we need to free it and
9605 * retain the existing one instead.
9607 * Furthermore, since the DM atomic state only contains the DC
9608 * context and can safely be annulled, we can free the state
9609 * and clear the associated private object now to free
9610 * some memory and avoid a possible use-after-free later.
9613 for (i = 0; i < state->num_private_objs; i++) {
9614 struct drm_private_obj *obj = state->private_objs[i].ptr;
9616 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9617 int j = state->num_private_objs-1;
9619 dm_atomic_destroy_state(obj,
9620 state->private_objs[i].state);
9622 /* If i is not at the end of the array then the
9623 * last element needs to be moved to where i was
9624 * before the array can safely be truncated.
9627 state->private_objs[i] =
9628 state->private_objs[j];
9630 state->private_objs[j].ptr = NULL;
9631 state->private_objs[j].state = NULL;
9632 state->private_objs[j].old_state = NULL;
9633 state->private_objs[j].new_state = NULL;
9635 state->num_private_objs = j;
9641 /* Store the overall update type for use later in atomic check. */
9642 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9643 struct dm_crtc_state *dm_new_crtc_state =
9644 to_dm_crtc_state(new_crtc_state);
9646 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9651 /* Must be success */
9654 trace_amdgpu_dm_atomic_check_finish(state, ret);
9659 if (ret == -EDEADLK)
9660 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9661 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9662 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9664 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9666 trace_amdgpu_dm_atomic_check_finish(state, ret);
9671 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9672 struct amdgpu_dm_connector *amdgpu_dm_connector)
9675 bool capable = false;
9677 if (amdgpu_dm_connector->dc_link &&
9678 dm_helpers_dp_read_dpcd(
9680 amdgpu_dm_connector->dc_link,
9681 DP_DOWN_STREAM_PORT_COUNT,
9683 sizeof(dpcd_data))) {
9684 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9689 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9693 bool edid_check_required;
9694 struct detailed_timing *timing;
9695 struct detailed_non_pixel *data;
9696 struct detailed_data_monitor_range *range;
9697 struct amdgpu_dm_connector *amdgpu_dm_connector =
9698 to_amdgpu_dm_connector(connector);
9699 struct dm_connector_state *dm_con_state = NULL;
9701 struct drm_device *dev = connector->dev;
9702 struct amdgpu_device *adev = drm_to_adev(dev);
9703 bool freesync_capable = false;
9705 if (!connector->state) {
9706 DRM_ERROR("%s - Connector has no state", __func__);
9711 dm_con_state = to_dm_connector_state(connector->state);
9713 amdgpu_dm_connector->min_vfreq = 0;
9714 amdgpu_dm_connector->max_vfreq = 0;
9715 amdgpu_dm_connector->pixel_clock_mhz = 0;
9720 dm_con_state = to_dm_connector_state(connector->state);
9722 edid_check_required = false;
9723 if (!amdgpu_dm_connector->dc_sink) {
9724 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9727 if (!adev->dm.freesync_module)
9730 * if edid non zero restrict freesync only for dp and edp
9733 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9734 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9735 edid_check_required = is_dp_capable_without_timing_msa(
9737 amdgpu_dm_connector);
9740 if (edid_check_required == true && (edid->version > 1 ||
9741 (edid->version == 1 && edid->revision > 1))) {
9742 for (i = 0; i < 4; i++) {
9744 timing = &edid->detailed_timings[i];
9745 data = &timing->data.other_data;
9746 range = &data->data.range;
9748 * Check if monitor has continuous frequency mode
9750 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9753 * Check for flag range limits only. If flag == 1 then
9754 * no additional timing information provided.
9755 * Default GTF, GTF Secondary curve and CVT are not
9758 if (range->flags != 1)
9761 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9762 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9763 amdgpu_dm_connector->pixel_clock_mhz =
9764 range->pixel_clock_mhz * 10;
9766 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9767 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9772 if (amdgpu_dm_connector->max_vfreq -
9773 amdgpu_dm_connector->min_vfreq > 10) {
9775 freesync_capable = true;
9781 dm_con_state->freesync_capable = freesync_capable;
9783 if (connector->vrr_capable_property)
9784 drm_connector_set_vrr_capable_property(connector,
9788 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9790 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9792 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9794 if (link->type == dc_connection_none)
9796 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9797 dpcd_data, sizeof(dpcd_data))) {
9798 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9800 if (dpcd_data[0] == 0) {
9801 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9802 link->psr_settings.psr_feature_enabled = false;
9804 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9805 link->psr_settings.psr_feature_enabled = true;
9808 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9813 * amdgpu_dm_link_setup_psr() - configure psr link
9814 * @stream: stream state
9816 * Return: true if success
9818 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9820 struct dc_link *link = NULL;
9821 struct psr_config psr_config = {0};
9822 struct psr_context psr_context = {0};
9828 link = stream->link;
9830 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9832 if (psr_config.psr_version > 0) {
9833 psr_config.psr_exit_link_training_required = 0x1;
9834 psr_config.psr_frame_capture_indication_req = 0;
9835 psr_config.psr_rfb_setup_time = 0x37;
9836 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9837 psr_config.allow_smu_optimizations = 0x0;
9839 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9842 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9848 * amdgpu_dm_psr_enable() - enable psr f/w
9849 * @stream: stream state
9851 * Return: true if success
9853 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9855 struct dc_link *link = stream->link;
9856 unsigned int vsync_rate_hz = 0;
9857 struct dc_static_screen_params params = {0};
9858 /* Calculate number of static frames before generating interrupt to
9861 // Init fail safe of 2 frames static
9862 unsigned int num_frames_static = 2;
9864 DRM_DEBUG_DRIVER("Enabling psr...\n");
9866 vsync_rate_hz = div64_u64(div64_u64((
9867 stream->timing.pix_clk_100hz * 100),
9868 stream->timing.v_total),
9869 stream->timing.h_total);
9872 * Calculate number of frames such that at least 30 ms of time has
9875 if (vsync_rate_hz != 0) {
9876 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9877 num_frames_static = (30000 / frame_time_microsec) + 1;
9880 params.triggers.cursor_update = true;
9881 params.triggers.overlay_update = true;
9882 params.triggers.surface_update = true;
9883 params.num_frames = num_frames_static;
9885 dc_stream_set_static_screen_params(link->ctx->dc,
9889 return dc_link_set_psr_allow_active(link, true, false, false);
9893 * amdgpu_dm_psr_disable() - disable psr f/w
9894 * @stream: stream state
9896 * Return: true if success
9898 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9901 DRM_DEBUG_DRIVER("Disabling psr...\n");
9903 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9907 * amdgpu_dm_psr_disable() - disable psr f/w
9908 * if psr is enabled on any stream
9910 * Return: true if success
9912 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9914 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9915 return dc_set_psr_allow_active(dm->dc, false);
9918 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9920 struct amdgpu_device *adev = drm_to_adev(dev);
9921 struct dc *dc = adev->dm.dc;
9924 mutex_lock(&adev->dm.dc_lock);
9925 if (dc->current_state) {
9926 for (i = 0; i < dc->current_state->stream_count; ++i)
9927 dc->current_state->streams[i]
9928 ->triggered_crtc_reset.enabled =
9929 adev->dm.force_timing_sync;
9931 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9932 dc_trigger_sync(dc, dc->current_state);
9934 mutex_unlock(&adev->dm.dc_lock);
9937 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9938 uint32_t value, const char *func_name)
9940 #ifdef DM_CHECK_ADDR_0
9942 DC_ERR("invalid register write. address = 0");
9946 cgs_write_register(ctx->cgs_device, address, value);
9947 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9950 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9951 const char *func_name)
9954 #ifdef DM_CHECK_ADDR_0
9956 DC_ERR("invalid register read; address = 0\n");
9961 if (ctx->dmub_srv &&
9962 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9963 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9968 value = cgs_read_register(ctx->cgs_device, address);
9970 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);