2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_display_types.h"
30 #include "intel_psr.h"
31 #include "intel_sprite.h"
34 * DOC: Panel Self Refresh (PSR/SRD)
36 * Since Haswell Display controller supports Panel Self-Refresh on display
37 * panels witch have a remote frame buffer (RFB) implemented according to PSR
38 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
39 * when system is idle but display is on as it eliminates display refresh
40 * request to DDR memory completely as long as the frame buffer for that
41 * display is unchanged.
43 * Panel Self Refresh must be supported by both Hardware (source) and
46 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
47 * to power down the link and memory controller. For DSI panels the same idea
48 * is called "manual mode".
50 * The implementation uses the hardware-based PSR support which automatically
51 * enters/exits self-refresh mode. The hardware takes care of sending the
52 * required DP aux message and could even retrain the link (that part isn't
53 * enabled yet though). The hardware also keeps track of any frontbuffer
54 * changes to know when to exit self-refresh mode again. Unfortunately that
55 * part doesn't work too well, hence why the i915 PSR support uses the
56 * software frontbuffer tracking to make sure it doesn't miss a screen
57 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
58 * get called by the frontbuffer tracking code. Note that because of locking
59 * issues the self-refresh re-enable code is done from a work queue, which
60 * must be correctly synchronized/cancelled when shutting down the pipe."
63 static bool psr_global_enabled(u32 debug)
65 switch (debug & I915_PSR_DEBUG_MODE_MASK) {
66 case I915_PSR_DEBUG_DEFAULT:
67 return i915_modparams.enable_psr;
68 case I915_PSR_DEBUG_DISABLE:
75 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
76 const struct intel_crtc_state *crtc_state)
78 /* Cannot enable DSC and PSR2 simultaneously */
79 WARN_ON(crtc_state->dsc.compression_enable &&
80 crtc_state->has_psr2);
82 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
83 case I915_PSR_DEBUG_DISABLE:
84 case I915_PSR_DEBUG_FORCE_PSR1:
87 return crtc_state->has_psr2;
91 static void psr_irq_control(struct drm_i915_private *dev_priv)
93 enum transcoder trans_shift;
98 * gen12+ has registers relative to transcoder and one per transcoder
99 * using the same bit definition: handle it as TRANSCODER_EDP to force
100 * 0 shift in bit definition
102 if (INTEL_GEN(dev_priv) >= 12) {
104 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
106 trans_shift = dev_priv->psr.transcoder;
107 imr_reg = EDP_PSR_IMR;
110 mask = EDP_PSR_ERROR(trans_shift);
111 if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
112 mask |= EDP_PSR_POST_EXIT(trans_shift) |
113 EDP_PSR_PRE_ENTRY(trans_shift);
115 /* Warning: it is masking/setting reserved bits too */
116 val = I915_READ(imr_reg);
117 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
119 I915_WRITE(imr_reg, val);
122 static void psr_event_print(u32 val, bool psr2_enabled)
124 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
125 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
126 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
127 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
128 DRM_DEBUG_KMS("\tPSR2 disabled\n");
129 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
130 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
131 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
132 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
133 if (val & PSR_EVENT_GRAPHICS_RESET)
134 DRM_DEBUG_KMS("\tGraphics reset\n");
135 if (val & PSR_EVENT_PCH_INTERRUPT)
136 DRM_DEBUG_KMS("\tPCH interrupt\n");
137 if (val & PSR_EVENT_MEMORY_UP)
138 DRM_DEBUG_KMS("\tMemory up\n");
139 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
140 DRM_DEBUG_KMS("\tFront buffer modification\n");
141 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
142 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
143 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
144 DRM_DEBUG_KMS("\tPIPE registers updated\n");
145 if (val & PSR_EVENT_REGISTER_UPDATE)
146 DRM_DEBUG_KMS("\tRegister updated\n");
147 if (val & PSR_EVENT_HDCP_ENABLE)
148 DRM_DEBUG_KMS("\tHDCP enabled\n");
149 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
150 DRM_DEBUG_KMS("\tKVMR session enabled\n");
151 if (val & PSR_EVENT_VBI_ENABLE)
152 DRM_DEBUG_KMS("\tVBI enabled\n");
153 if (val & PSR_EVENT_LPSP_MODE_EXIT)
154 DRM_DEBUG_KMS("\tLPSP mode exited\n");
155 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
156 DRM_DEBUG_KMS("\tPSR disabled\n");
159 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
161 enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
162 enum transcoder trans_shift;
164 ktime_t time_ns = ktime_get();
166 if (INTEL_GEN(dev_priv) >= 12) {
168 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
170 trans_shift = dev_priv->psr.transcoder;
171 imr_reg = EDP_PSR_IMR;
174 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
175 dev_priv->psr.last_entry_attempt = time_ns;
176 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
177 transcoder_name(cpu_transcoder));
180 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
181 dev_priv->psr.last_exit = time_ns;
182 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
183 transcoder_name(cpu_transcoder));
185 if (INTEL_GEN(dev_priv) >= 9) {
186 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
187 bool psr2_enabled = dev_priv->psr.psr2_enabled;
189 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
190 psr_event_print(val, psr2_enabled);
194 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
197 DRM_WARN("[transcoder %s] PSR aux error\n",
198 transcoder_name(cpu_transcoder));
200 dev_priv->psr.irq_aux_error = true;
203 * If this interruption is not masked it will keep
204 * interrupting so fast that it prevents the scheduled
206 * Also after a PSR error, we don't want to arm PSR
207 * again so we don't care about unmask the interruption
208 * or unset irq_aux_error.
210 val = I915_READ(imr_reg);
211 val |= EDP_PSR_ERROR(trans_shift);
212 I915_WRITE(imr_reg, val);
214 schedule_work(&dev_priv->psr.work);
218 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
222 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
225 return alpm_caps & DP_ALPM_CAP;
228 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
230 u8 val = 8; /* assume the worst if we can't read the value */
232 if (drm_dp_dpcd_readb(&intel_dp->aux,
233 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
234 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
236 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
240 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
246 * Returning the default X granularity if granularity not required or
249 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
252 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
254 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
257 * Spec says that if the value read is 0 the default granularity should
260 if (r != 2 || val == 0)
266 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
268 struct drm_i915_private *dev_priv =
269 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
271 if (dev_priv->psr.dp) {
272 DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
276 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
277 sizeof(intel_dp->psr_dpcd));
279 if (!intel_dp->psr_dpcd[0])
281 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
282 intel_dp->psr_dpcd[0]);
284 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
285 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
289 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
290 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
294 dev_priv->psr.sink_support = true;
295 dev_priv->psr.sink_sync_latency =
296 intel_dp_get_sink_sync_latency(intel_dp);
298 dev_priv->psr.dp = intel_dp;
300 if (INTEL_GEN(dev_priv) >= 9 &&
301 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
302 bool y_req = intel_dp->psr_dpcd[1] &
303 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
304 bool alpm = intel_dp_get_alpm_status(intel_dp);
307 * All panels that supports PSR version 03h (PSR2 +
308 * Y-coordinate) can handle Y-coordinates in VSC but we are
309 * only sure that it is going to be used when required by the
310 * panel. This way panel is capable to do selective update
311 * without a aux frame sync.
313 * To support PSR version 02h and PSR version 03h without
314 * Y-coordinate requirement panels we would need to enable
317 dev_priv->psr.sink_psr2_support = y_req && alpm;
318 DRM_DEBUG_KMS("PSR2 %ssupported\n",
319 dev_priv->psr.sink_psr2_support ? "" : "not ");
321 if (dev_priv->psr.sink_psr2_support) {
322 dev_priv->psr.colorimetry_support =
323 intel_dp_get_colorimetry_status(intel_dp);
324 dev_priv->psr.su_x_granularity =
325 intel_dp_get_su_x_granulartiy(intel_dp);
330 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
331 const struct intel_crtc_state *crtc_state)
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
335 struct dp_sdp psr_vsc;
337 if (dev_priv->psr.psr2_enabled) {
338 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
339 memset(&psr_vsc, 0, sizeof(psr_vsc));
340 psr_vsc.sdp_header.HB0 = 0;
341 psr_vsc.sdp_header.HB1 = 0x7;
342 if (dev_priv->psr.colorimetry_support) {
343 psr_vsc.sdp_header.HB2 = 0x5;
344 psr_vsc.sdp_header.HB3 = 0x13;
346 psr_vsc.sdp_header.HB2 = 0x4;
347 psr_vsc.sdp_header.HB3 = 0xe;
350 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
351 memset(&psr_vsc, 0, sizeof(psr_vsc));
352 psr_vsc.sdp_header.HB0 = 0;
353 psr_vsc.sdp_header.HB1 = 0x7;
354 psr_vsc.sdp_header.HB2 = 0x2;
355 psr_vsc.sdp_header.HB3 = 0x8;
358 intel_dig_port->write_infoframe(&intel_dig_port->base,
360 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
363 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
365 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
366 u32 aux_clock_divider, aux_ctl;
368 static const u8 aux_msg[] = {
369 [0] = DP_AUX_NATIVE_WRITE << 4,
370 [1] = DP_SET_POWER >> 8,
371 [2] = DP_SET_POWER & 0xff,
373 [4] = DP_SET_POWER_D0,
375 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
376 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
377 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
378 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
380 BUILD_BUG_ON(sizeof(aux_msg) > 20);
381 for (i = 0; i < sizeof(aux_msg); i += 4)
382 I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
383 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
385 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
387 /* Start with bits set for DDI_AUX_CTL register */
388 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
391 /* Select only valid bits for SRD_AUX_CTL */
392 aux_ctl &= psr_aux_mask;
393 I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
396 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
398 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
399 u8 dpcd_val = DP_PSR_ENABLE;
401 /* Enable ALPM at sink for psr2 */
402 if (dev_priv->psr.psr2_enabled) {
403 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
405 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
407 if (dev_priv->psr.link_standby)
408 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
410 if (INTEL_GEN(dev_priv) >= 8)
411 dpcd_val |= DP_PSR_CRC_VERIFICATION;
414 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
416 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
419 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
421 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
424 if (INTEL_GEN(dev_priv) >= 11)
425 val |= EDP_PSR_TP4_TIME_0US;
427 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
428 val |= EDP_PSR_TP1_TIME_0us;
429 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
430 val |= EDP_PSR_TP1_TIME_100us;
431 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
432 val |= EDP_PSR_TP1_TIME_500us;
434 val |= EDP_PSR_TP1_TIME_2500us;
436 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
437 val |= EDP_PSR_TP2_TP3_TIME_0us;
438 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
439 val |= EDP_PSR_TP2_TP3_TIME_100us;
440 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
441 val |= EDP_PSR_TP2_TP3_TIME_500us;
443 val |= EDP_PSR_TP2_TP3_TIME_2500us;
445 if (intel_dp_source_supports_hbr2(intel_dp) &&
446 drm_dp_tps3_supported(intel_dp->dpcd))
447 val |= EDP_PSR_TP1_TP3_SEL;
449 val |= EDP_PSR_TP1_TP2_SEL;
454 static void hsw_activate_psr1(struct intel_dp *intel_dp)
456 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
457 u32 max_sleep_time = 0x1f;
458 u32 val = EDP_PSR_ENABLE;
460 /* Let's use 6 as the minimum to cover all known cases including the
461 * off-by-one issue that HW has in some cases.
463 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
465 /* sink_sync_latency of 8 means source has to wait for more than 8
466 * frames, we'll go with 9 frames for now
468 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
469 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
471 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
472 if (IS_HASWELL(dev_priv))
473 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
475 if (dev_priv->psr.link_standby)
476 val |= EDP_PSR_LINK_STANDBY;
478 val |= intel_psr1_get_tp_time(intel_dp);
480 if (INTEL_GEN(dev_priv) >= 8)
481 val |= EDP_PSR_CRC_ENABLE;
483 val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
484 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
485 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
488 static void hsw_activate_psr2(struct intel_dp *intel_dp)
490 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
493 /* Let's use 6 as the minimum to cover all known cases including the
494 * off-by-one issue that HW has in some cases.
496 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
498 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
499 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
501 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
502 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
503 val |= EDP_Y_COORDINATE_ENABLE;
505 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
507 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
508 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
509 val |= EDP_PSR2_TP2_TIME_50us;
510 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
511 val |= EDP_PSR2_TP2_TIME_100us;
512 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
513 val |= EDP_PSR2_TP2_TIME_500us;
515 val |= EDP_PSR2_TP2_TIME_2500us;
518 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
519 * recommending keep this bit unset while PSR2 is enabled.
521 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
523 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
527 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
529 if (INTEL_GEN(dev_priv) < 9)
531 else if (INTEL_GEN(dev_priv) >= 12)
532 return trans == TRANSCODER_A;
534 return trans == TRANSCODER_EDP;
537 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
539 if (!cstate || !cstate->base.active)
542 return DIV_ROUND_UP(1000 * 1000,
543 drm_mode_vrefresh(&cstate->base.adjusted_mode));
546 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
551 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
552 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
553 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
555 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
558 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
560 psr2_program_idle_frames(dev_priv, 0);
561 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
564 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
568 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
570 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
571 * cases including the off-by-one issue that HW has in some cases.
573 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
574 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
575 psr2_program_idle_frames(dev_priv, idle_frames);
578 static void tgl_dc5_idle_thread(struct work_struct *work)
580 struct drm_i915_private *dev_priv =
581 container_of(work, typeof(*dev_priv), psr.idle_work.work);
583 mutex_lock(&dev_priv->psr.lock);
584 /* If delayed work is pending, it is not idle */
585 if (delayed_work_pending(&dev_priv->psr.idle_work))
588 DRM_DEBUG_KMS("DC5/6 idle thread\n");
589 tgl_psr2_disable_dc3co(dev_priv);
591 mutex_unlock(&dev_priv->psr.lock);
594 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
596 if (!dev_priv->psr.dc3co_enabled)
599 cancel_delayed_work(&dev_priv->psr.idle_work);
600 /* Before PSR2 exit disallow dc3co*/
601 tgl_psr2_disable_dc3co(dev_priv);
604 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
605 struct intel_crtc_state *crtc_state)
607 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
608 int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
609 int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
610 int psr_max_h = 0, psr_max_v = 0;
612 if (!dev_priv->psr.sink_psr2_support)
615 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
616 DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
617 transcoder_name(crtc_state->cpu_transcoder));
622 * DSC and PSR2 cannot be enabled simultaneously. If a requested
623 * resolution requires DSC to be enabled, priority is given to DSC
626 if (crtc_state->dsc.compression_enable) {
627 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
631 if (INTEL_GEN(dev_priv) >= 12) {
634 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
637 } else if (IS_GEN(dev_priv, 9)) {
642 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
643 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
644 crtc_hdisplay, crtc_vdisplay,
645 psr_max_h, psr_max_v);
650 * HW sends SU blocks of size four scan lines, which means the starting
651 * X coordinate and Y granularity requirements will always be met. We
652 * only need to validate the SU block width is a multiple of
655 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
656 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
657 crtc_hdisplay, dev_priv->psr.su_x_granularity);
661 if (crtc_state->crc_enabled) {
662 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
669 void intel_psr_compute_config(struct intel_dp *intel_dp,
670 struct intel_crtc_state *crtc_state)
672 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
673 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
674 const struct drm_display_mode *adjusted_mode =
675 &crtc_state->base.adjusted_mode;
678 if (!CAN_PSR(dev_priv))
681 if (intel_dp != dev_priv->psr.dp)
685 * HSW spec explicitly says PSR is tied to port A.
686 * BDW+ platforms have a instance of PSR registers per transcoder but
687 * for now it only supports one instance of PSR, so lets keep it
688 * hardcoded to PORT_A
690 if (dig_port->base.port != PORT_A) {
691 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
695 if (dev_priv->psr.sink_not_reliable) {
696 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
700 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
701 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
705 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
706 if (psr_setup_time < 0) {
707 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
708 intel_dp->psr_dpcd[1]);
712 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
713 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
714 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
719 crtc_state->has_psr = true;
720 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
723 static void intel_psr_activate(struct intel_dp *intel_dp)
725 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
727 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
728 WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
730 WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
731 WARN_ON(dev_priv->psr.active);
732 lockdep_assert_held(&dev_priv->psr.lock);
734 /* psr1 and psr2 are mutually exclusive.*/
735 if (dev_priv->psr.psr2_enabled)
736 hsw_activate_psr2(intel_dp);
738 hsw_activate_psr1(intel_dp);
740 dev_priv->psr.active = true;
743 static void intel_psr_enable_source(struct intel_dp *intel_dp,
744 const struct intel_crtc_state *crtc_state)
746 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
750 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
751 * use hardcoded values PSR AUX transactions
753 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
754 hsw_psr_setup_aux(intel_dp);
756 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
757 !IS_GEMINILAKE(dev_priv))) {
758 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
759 u32 chicken = I915_READ(reg);
761 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
762 PSR2_ADD_VERTICAL_LINE_COUNT;
763 I915_WRITE(reg, chicken);
767 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
768 * mask LPSP to avoid dependency on other drivers that might block
769 * runtime_pm besides preventing other hw tracking issues now we
770 * can rely on frontbuffer tracking.
772 mask = EDP_PSR_DEBUG_MASK_MEMUP |
773 EDP_PSR_DEBUG_MASK_HPD |
774 EDP_PSR_DEBUG_MASK_LPSP |
775 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
777 if (INTEL_GEN(dev_priv) < 11)
778 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
780 I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
782 psr_irq_control(dev_priv);
785 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
786 const struct intel_crtc_state *crtc_state)
788 struct intel_dp *intel_dp = dev_priv->psr.dp;
791 WARN_ON(dev_priv->psr.enabled);
793 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
794 dev_priv->psr.busy_frontbuffer_bits = 0;
795 dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
796 dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
797 dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
798 dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
801 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
802 * will still keep the error set even after the reset done in the
803 * irq_preinstall and irq_uninstall hooks.
804 * And enabling in this situation cause the screen to freeze in the
805 * first time that PSR HW tries to activate so lets keep PSR disabled
806 * to avoid any rendering problems.
808 if (INTEL_GEN(dev_priv) >= 12) {
809 val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
810 val &= EDP_PSR_ERROR(0);
812 val = I915_READ(EDP_PSR_IIR);
813 val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
816 dev_priv->psr.sink_not_reliable = true;
817 DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
821 DRM_DEBUG_KMS("Enabling PSR%s\n",
822 dev_priv->psr.psr2_enabled ? "2" : "1");
823 intel_psr_setup_vsc(intel_dp, crtc_state);
824 intel_psr_enable_sink(intel_dp);
825 intel_psr_enable_source(intel_dp, crtc_state);
826 dev_priv->psr.enabled = true;
828 intel_psr_activate(intel_dp);
832 * intel_psr_enable - Enable PSR
833 * @intel_dp: Intel DP
834 * @crtc_state: new CRTC state
836 * This function can only be called after the pipe is fully trained and enabled.
838 void intel_psr_enable(struct intel_dp *intel_dp,
839 const struct intel_crtc_state *crtc_state)
841 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
843 if (!crtc_state->has_psr)
846 if (WARN_ON(!CAN_PSR(dev_priv)))
849 WARN_ON(dev_priv->drrs.dp);
851 mutex_lock(&dev_priv->psr.lock);
853 if (!psr_global_enabled(dev_priv->psr.debug)) {
854 DRM_DEBUG_KMS("PSR disabled by flag\n");
858 intel_psr_enable_locked(dev_priv, crtc_state);
861 mutex_unlock(&dev_priv->psr.lock);
864 static void intel_psr_exit(struct drm_i915_private *dev_priv)
868 if (!dev_priv->psr.active) {
869 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
870 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
871 WARN_ON(val & EDP_PSR2_ENABLE);
874 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
875 WARN_ON(val & EDP_PSR_ENABLE);
880 if (dev_priv->psr.psr2_enabled) {
881 tgl_disallow_dc3co_on_psr2_exit(dev_priv);
882 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
883 WARN_ON(!(val & EDP_PSR2_ENABLE));
884 val &= ~EDP_PSR2_ENABLE;
885 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
887 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
888 WARN_ON(!(val & EDP_PSR_ENABLE));
889 val &= ~EDP_PSR_ENABLE;
890 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
892 dev_priv->psr.active = false;
895 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
897 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
898 i915_reg_t psr_status;
901 lockdep_assert_held(&dev_priv->psr.lock);
903 if (!dev_priv->psr.enabled)
906 DRM_DEBUG_KMS("Disabling PSR%s\n",
907 dev_priv->psr.psr2_enabled ? "2" : "1");
909 intel_psr_exit(dev_priv);
911 if (dev_priv->psr.psr2_enabled) {
912 psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
913 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
915 psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
916 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
919 /* Wait till PSR is idle */
920 if (intel_de_wait_for_clear(dev_priv, psr_status,
921 psr_status_mask, 2000))
922 DRM_ERROR("Timed out waiting PSR idle state\n");
924 /* Disable PSR on Sink */
925 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
927 dev_priv->psr.enabled = false;
931 * intel_psr_disable - Disable PSR
932 * @intel_dp: Intel DP
933 * @old_crtc_state: old CRTC state
935 * This function needs to be called before disabling pipe.
937 void intel_psr_disable(struct intel_dp *intel_dp,
938 const struct intel_crtc_state *old_crtc_state)
940 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
942 if (!old_crtc_state->has_psr)
945 if (WARN_ON(!CAN_PSR(dev_priv)))
948 mutex_lock(&dev_priv->psr.lock);
950 intel_psr_disable_locked(intel_dp);
952 mutex_unlock(&dev_priv->psr.lock);
953 cancel_work_sync(&dev_priv->psr.work);
954 cancel_delayed_work_sync(&dev_priv->psr.idle_work);
957 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
959 if (INTEL_GEN(dev_priv) >= 9)
961 * Display WA #0884: skl+
962 * This documented WA for bxt can be safely applied
963 * broadly so we can force HW tracking to exit PSR
964 * instead of disabling and re-enabling.
965 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
966 * but it makes more sense write to the current active
969 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
972 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
973 * on older gens so doing the manual exit instead.
975 intel_psr_exit(dev_priv);
979 * intel_psr_update - Update PSR state
980 * @intel_dp: Intel DP
981 * @crtc_state: new CRTC state
983 * This functions will update PSR states, disabling, enabling or switching PSR
984 * version when executing fastsets. For full modeset, intel_psr_disable() and
985 * intel_psr_enable() should be called instead.
987 void intel_psr_update(struct intel_dp *intel_dp,
988 const struct intel_crtc_state *crtc_state)
990 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
991 struct i915_psr *psr = &dev_priv->psr;
992 bool enable, psr2_enable;
994 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
997 mutex_lock(&dev_priv->psr.lock);
999 enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
1000 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
1002 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
1003 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1004 if (crtc_state->crc_enabled && psr->enabled)
1005 psr_force_hw_tracking_exit(dev_priv);
1006 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
1008 * Activate PSR again after a force exit when enabling
1011 if (!dev_priv->psr.active &&
1012 !dev_priv->psr.busy_frontbuffer_bits)
1013 schedule_work(&dev_priv->psr.work);
1020 intel_psr_disable_locked(intel_dp);
1023 intel_psr_enable_locked(dev_priv, crtc_state);
1026 mutex_unlock(&dev_priv->psr.lock);
1030 * intel_psr_wait_for_idle - wait for PSR1 to idle
1031 * @new_crtc_state: new CRTC state
1032 * @out_value: PSR status in case of failure
1034 * This function is expected to be called from pipe_update_start() where it is
1035 * not expected to race with PSR enable or disable.
1037 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1039 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
1042 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1043 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1045 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
1048 /* FIXME: Update this for PSR2 if we need to wait for idle */
1049 if (READ_ONCE(dev_priv->psr.psr2_enabled))
1053 * From bspec: Panel Self Refresh (BDW+)
1054 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1055 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1056 * defensive enough to cover everything.
1059 return __intel_wait_for_register(&dev_priv->uncore,
1060 EDP_PSR_STATUS(dev_priv->psr.transcoder),
1061 EDP_PSR_STATUS_STATE_MASK,
1062 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1066 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
1072 if (!dev_priv->psr.enabled)
1075 if (dev_priv->psr.psr2_enabled) {
1076 reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
1077 mask = EDP_PSR2_STATUS_STATE_MASK;
1079 reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
1080 mask = EDP_PSR_STATUS_STATE_MASK;
1083 mutex_unlock(&dev_priv->psr.lock);
1085 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1087 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
1089 /* After the unlocked wait, verify that PSR is still wanted! */
1090 mutex_lock(&dev_priv->psr.lock);
1091 return err == 0 && dev_priv->psr.enabled;
1094 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1096 struct drm_device *dev = &dev_priv->drm;
1097 struct drm_modeset_acquire_ctx ctx;
1098 struct drm_atomic_state *state;
1099 struct drm_crtc *crtc;
1102 state = drm_atomic_state_alloc(dev);
1106 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1107 state->acquire_ctx = &ctx;
1110 drm_for_each_crtc(crtc, dev) {
1111 struct drm_crtc_state *crtc_state;
1112 struct intel_crtc_state *intel_crtc_state;
1114 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1115 if (IS_ERR(crtc_state)) {
1116 err = PTR_ERR(crtc_state);
1120 intel_crtc_state = to_intel_crtc_state(crtc_state);
1122 if (crtc_state->active && intel_crtc_state->has_psr) {
1123 /* Mark mode as changed to trigger a pipe->update() */
1124 crtc_state->mode_changed = true;
1129 err = drm_atomic_commit(state);
1132 if (err == -EDEADLK) {
1133 drm_atomic_state_clear(state);
1134 err = drm_modeset_backoff(&ctx);
1139 drm_modeset_drop_locks(&ctx);
1140 drm_modeset_acquire_fini(&ctx);
1141 drm_atomic_state_put(state);
1146 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1148 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1152 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1153 mode > I915_PSR_DEBUG_FORCE_PSR1) {
1154 DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
1158 ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1162 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1163 dev_priv->psr.debug = val;
1166 * Do it right away if it's already enabled, otherwise it will be done
1167 * when enabling the source.
1169 if (dev_priv->psr.enabled)
1170 psr_irq_control(dev_priv);
1172 mutex_unlock(&dev_priv->psr.lock);
1174 if (old_mode != mode)
1175 ret = intel_psr_fastset_force(dev_priv);
1180 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1182 struct i915_psr *psr = &dev_priv->psr;
1184 intel_psr_disable_locked(psr->dp);
1185 psr->sink_not_reliable = true;
1186 /* let's make sure that sink is awaken */
1187 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1190 static void intel_psr_work(struct work_struct *work)
1192 struct drm_i915_private *dev_priv =
1193 container_of(work, typeof(*dev_priv), psr.work);
1195 mutex_lock(&dev_priv->psr.lock);
1197 if (!dev_priv->psr.enabled)
1200 if (READ_ONCE(dev_priv->psr.irq_aux_error))
1201 intel_psr_handle_irq(dev_priv);
1204 * We have to make sure PSR is ready for re-enable
1205 * otherwise it keeps disabled until next full enable/disable cycle.
1206 * PSR might take some time to get fully disabled
1207 * and be ready for re-enable.
1209 if (!__psr_wait_for_idle_locked(dev_priv))
1213 * The delayed work can race with an invalidate hence we need to
1214 * recheck. Since psr_flush first clears this and then reschedules we
1215 * won't ever miss a flush when bailing out here.
1217 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1220 intel_psr_activate(dev_priv->psr.dp);
1222 mutex_unlock(&dev_priv->psr.lock);
1226 * intel_psr_invalidate - Invalidade PSR
1227 * @dev_priv: i915 device
1228 * @frontbuffer_bits: frontbuffer plane tracking bits
1229 * @origin: which operation caused the invalidate
1231 * Since the hardware frontbuffer tracking has gaps we need to integrate
1232 * with the software frontbuffer tracking. This function gets called every
1233 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1234 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1236 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1238 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1239 unsigned frontbuffer_bits, enum fb_op_origin origin)
1241 if (!CAN_PSR(dev_priv))
1244 if (origin == ORIGIN_FLIP)
1247 mutex_lock(&dev_priv->psr.lock);
1248 if (!dev_priv->psr.enabled) {
1249 mutex_unlock(&dev_priv->psr.lock);
1253 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1254 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1256 if (frontbuffer_bits)
1257 intel_psr_exit(dev_priv);
1259 mutex_unlock(&dev_priv->psr.lock);
1263 * When we will be completely rely on PSR2 S/W tracking in future,
1264 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1265 * event also therefore tgl_dc3co_flush() require to be changed
1266 * accrodingly in future.
1269 tgl_dc3co_flush(struct drm_i915_private *dev_priv,
1270 unsigned int frontbuffer_bits, enum fb_op_origin origin)
1274 mutex_lock(&dev_priv->psr.lock);
1276 if (!dev_priv->psr.dc3co_enabled)
1279 if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
1283 * At every frontbuffer flush flip event modified delay of delayed work,
1284 * when delayed work schedules that means display has been idle.
1286 if (!(frontbuffer_bits &
1287 INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
1290 tgl_psr2_enable_dc3co(dev_priv);
1291 /* DC5/DC6 required idle frames = 6 */
1292 delay = 6 * dev_priv->psr.dc3co_exit_delay;
1293 mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
1294 usecs_to_jiffies(delay));
1297 mutex_unlock(&dev_priv->psr.lock);
1301 * intel_psr_flush - Flush PSR
1302 * @dev_priv: i915 device
1303 * @frontbuffer_bits: frontbuffer plane tracking bits
1304 * @origin: which operation caused the flush
1306 * Since the hardware frontbuffer tracking has gaps we need to integrate
1307 * with the software frontbuffer tracking. This function gets called every
1308 * time frontbuffer rendering has completed and flushed out to memory. PSR
1309 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1311 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1313 void intel_psr_flush(struct drm_i915_private *dev_priv,
1314 unsigned frontbuffer_bits, enum fb_op_origin origin)
1316 if (!CAN_PSR(dev_priv))
1319 if (origin == ORIGIN_FLIP) {
1320 tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
1324 mutex_lock(&dev_priv->psr.lock);
1325 if (!dev_priv->psr.enabled) {
1326 mutex_unlock(&dev_priv->psr.lock);
1330 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1331 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1333 /* By definition flush = invalidate + flush */
1334 if (frontbuffer_bits)
1335 psr_force_hw_tracking_exit(dev_priv);
1337 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1338 schedule_work(&dev_priv->psr.work);
1339 mutex_unlock(&dev_priv->psr.lock);
1343 * intel_psr_init - Init basic PSR work and mutex.
1344 * @dev_priv: i915 device private
1346 * This function is called only once at driver load to initialize basic
1349 void intel_psr_init(struct drm_i915_private *dev_priv)
1351 if (!HAS_PSR(dev_priv))
1354 if (!dev_priv->psr.sink_support)
1357 if (IS_HASWELL(dev_priv))
1359 * HSW don't have PSR registers on the same space as transcoder
1360 * so set this to a value that when subtract to the register
1361 * in transcoder space results in the right offset for HSW
1363 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
1365 if (i915_modparams.enable_psr == -1)
1366 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1367 i915_modparams.enable_psr = 0;
1369 /* Set link_standby x link_off defaults */
1370 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1371 /* HSW and BDW require workarounds that we don't implement. */
1372 dev_priv->psr.link_standby = false;
1373 else if (INTEL_GEN(dev_priv) < 12)
1374 /* For new platforms up to TGL let's respect VBT back again */
1375 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1377 INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1378 INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
1379 mutex_init(&dev_priv->psr.lock);
1382 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1384 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1385 struct i915_psr *psr = &dev_priv->psr;
1387 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1388 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1389 DP_PSR_LINK_CRC_ERROR;
1391 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1394 mutex_lock(&psr->lock);
1396 if (!psr->enabled || psr->dp != intel_dp)
1399 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
1400 DRM_ERROR("PSR_STATUS dpcd read failed\n");
1404 if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
1405 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1406 intel_psr_disable_locked(intel_dp);
1407 psr->sink_not_reliable = true;
1410 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
1411 DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
1415 if (val & DP_PSR_RFB_STORAGE_ERROR)
1416 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1417 if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1418 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1419 if (val & DP_PSR_LINK_CRC_ERROR)
1420 DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
1423 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1426 intel_psr_disable_locked(intel_dp);
1427 psr->sink_not_reliable = true;
1429 /* clear status register */
1430 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1432 mutex_unlock(&psr->lock);
1435 bool intel_psr_enabled(struct intel_dp *intel_dp)
1437 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1440 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1443 mutex_lock(&dev_priv->psr.lock);
1444 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1445 mutex_unlock(&dev_priv->psr.lock);