2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_atomic.h"
30 #include "intel_display_types.h"
31 #include "intel_dp_aux.h"
32 #include "intel_hdmi.h"
33 #include "intel_psr.h"
34 #include "intel_sprite.h"
35 #include "skl_universal_plane.h"
38 * DOC: Panel Self Refresh (PSR/SRD)
40 * Since Haswell Display controller supports Panel Self-Refresh on display
41 * panels witch have a remote frame buffer (RFB) implemented according to PSR
42 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
43 * when system is idle but display is on as it eliminates display refresh
44 * request to DDR memory completely as long as the frame buffer for that
45 * display is unchanged.
47 * Panel Self Refresh must be supported by both Hardware (source) and
50 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
51 * to power down the link and memory controller. For DSI panels the same idea
52 * is called "manual mode".
54 * The implementation uses the hardware-based PSR support which automatically
55 * enters/exits self-refresh mode. The hardware takes care of sending the
56 * required DP aux message and could even retrain the link (that part isn't
57 * enabled yet though). The hardware also keeps track of any frontbuffer
58 * changes to know when to exit self-refresh mode again. Unfortunately that
59 * part doesn't work too well, hence why the i915 PSR support uses the
60 * software frontbuffer tracking to make sure it doesn't miss a screen
61 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
62 * get called by the frontbuffer tracking code. Note that because of locking
63 * issues the self-refresh re-enable code is done from a work queue, which
64 * must be correctly synchronized/cancelled when shutting down the pipe."
66 * DC3CO (DC3 clock off)
68 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
69 * clock off automatically during PSR2 idle state.
70 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
71 * entry/exit allows the HW to enter a low-power state even when page flipping
72 * periodically (for instance a 30fps video playback scenario).
74 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
75 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
76 * frames, if no other flip occurs and the function above is executed, DC3CO is
77 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
79 * Front buffer modifications do not trigger DC3CO activation on purpose as it
80 * would bring a lot of complexity and most of the moderns systems will only
84 static bool psr_global_enabled(struct intel_dp *intel_dp)
86 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
88 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
89 case I915_PSR_DEBUG_DEFAULT:
90 return i915->params.enable_psr;
91 case I915_PSR_DEBUG_DISABLE:
98 static bool psr2_global_enabled(struct intel_dp *intel_dp)
100 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
101 case I915_PSR_DEBUG_DISABLE:
102 case I915_PSR_DEBUG_FORCE_PSR1:
109 static void psr_irq_control(struct intel_dp *intel_dp)
111 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
112 enum transcoder trans_shift;
117 * gen12+ has registers relative to transcoder and one per transcoder
118 * using the same bit definition: handle it as TRANSCODER_EDP to force
119 * 0 shift in bit definition
121 if (DISPLAY_VER(dev_priv) >= 12) {
123 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
125 trans_shift = intel_dp->psr.transcoder;
126 imr_reg = EDP_PSR_IMR;
129 mask = EDP_PSR_ERROR(trans_shift);
130 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
131 mask |= EDP_PSR_POST_EXIT(trans_shift) |
132 EDP_PSR_PRE_ENTRY(trans_shift);
134 /* Warning: it is masking/setting reserved bits too */
135 val = intel_de_read(dev_priv, imr_reg);
136 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
138 intel_de_write(dev_priv, imr_reg, val);
141 static void psr_event_print(struct drm_i915_private *i915,
142 u32 val, bool psr2_enabled)
144 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
145 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
146 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
147 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
148 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
149 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
150 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
151 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
152 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
153 if (val & PSR_EVENT_GRAPHICS_RESET)
154 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
155 if (val & PSR_EVENT_PCH_INTERRUPT)
156 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
157 if (val & PSR_EVENT_MEMORY_UP)
158 drm_dbg_kms(&i915->drm, "\tMemory up\n");
159 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
160 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
161 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
162 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
163 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
164 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
165 if (val & PSR_EVENT_REGISTER_UPDATE)
166 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
167 if (val & PSR_EVENT_HDCP_ENABLE)
168 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
169 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
170 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
171 if (val & PSR_EVENT_VBI_ENABLE)
172 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
173 if (val & PSR_EVENT_LPSP_MODE_EXIT)
174 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
175 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
176 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
179 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
181 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
182 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
183 ktime_t time_ns = ktime_get();
184 enum transcoder trans_shift;
187 if (DISPLAY_VER(dev_priv) >= 12) {
189 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
191 trans_shift = intel_dp->psr.transcoder;
192 imr_reg = EDP_PSR_IMR;
195 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
196 intel_dp->psr.last_entry_attempt = time_ns;
197 drm_dbg_kms(&dev_priv->drm,
198 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
199 transcoder_name(cpu_transcoder));
202 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
203 intel_dp->psr.last_exit = time_ns;
204 drm_dbg_kms(&dev_priv->drm,
205 "[transcoder %s] PSR exit completed\n",
206 transcoder_name(cpu_transcoder));
208 if (DISPLAY_VER(dev_priv) >= 9) {
209 u32 val = intel_de_read(dev_priv,
210 PSR_EVENT(cpu_transcoder));
211 bool psr2_enabled = intel_dp->psr.psr2_enabled;
213 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
215 psr_event_print(dev_priv, val, psr2_enabled);
219 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
222 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
223 transcoder_name(cpu_transcoder));
225 intel_dp->psr.irq_aux_error = true;
228 * If this interruption is not masked it will keep
229 * interrupting so fast that it prevents the scheduled
231 * Also after a PSR error, we don't want to arm PSR
232 * again so we don't care about unmask the interruption
233 * or unset irq_aux_error.
235 val = intel_de_read(dev_priv, imr_reg);
236 val |= EDP_PSR_ERROR(trans_shift);
237 intel_de_write(dev_priv, imr_reg, val);
239 schedule_work(&intel_dp->psr.work);
243 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
247 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
250 return alpm_caps & DP_ALPM_CAP;
253 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
255 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
256 u8 val = 8; /* assume the worst if we can't read the value */
258 if (drm_dp_dpcd_readb(&intel_dp->aux,
259 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
260 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
262 drm_dbg_kms(&i915->drm,
263 "Unable to get sink synchronization latency, assuming 8 frames\n");
267 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
269 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
274 * Returning the default X granularity if granularity not required or
277 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
280 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
282 drm_dbg_kms(&i915->drm,
283 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
286 * Spec says that if the value read is 0 the default granularity should
289 if (r != 2 || val == 0)
295 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
297 struct drm_i915_private *dev_priv =
298 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
300 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
301 sizeof(intel_dp->psr_dpcd));
303 if (!intel_dp->psr_dpcd[0])
305 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
306 intel_dp->psr_dpcd[0]);
308 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
309 drm_dbg_kms(&dev_priv->drm,
310 "PSR support not currently available for this panel\n");
314 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
315 drm_dbg_kms(&dev_priv->drm,
316 "Panel lacks power state control, PSR cannot be enabled\n");
320 intel_dp->psr.sink_support = true;
321 intel_dp->psr.sink_sync_latency =
322 intel_dp_get_sink_sync_latency(intel_dp);
324 if (DISPLAY_VER(dev_priv) >= 9 &&
325 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
326 bool y_req = intel_dp->psr_dpcd[1] &
327 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
328 bool alpm = intel_dp_get_alpm_status(intel_dp);
331 * All panels that supports PSR version 03h (PSR2 +
332 * Y-coordinate) can handle Y-coordinates in VSC but we are
333 * only sure that it is going to be used when required by the
334 * panel. This way panel is capable to do selective update
335 * without a aux frame sync.
337 * To support PSR version 02h and PSR version 03h without
338 * Y-coordinate requirement panels we would need to enable
341 intel_dp->psr.sink_psr2_support = y_req && alpm;
342 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
343 intel_dp->psr.sink_psr2_support ? "" : "not ");
345 if (intel_dp->psr.sink_psr2_support) {
346 intel_dp->psr.colorimetry_support =
347 intel_dp_get_colorimetry_status(intel_dp);
348 intel_dp->psr.su_x_granularity =
349 intel_dp_get_su_x_granulartiy(intel_dp);
354 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
356 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
357 u32 aux_clock_divider, aux_ctl;
359 static const u8 aux_msg[] = {
360 [0] = DP_AUX_NATIVE_WRITE << 4,
361 [1] = DP_SET_POWER >> 8,
362 [2] = DP_SET_POWER & 0xff,
364 [4] = DP_SET_POWER_D0,
366 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
367 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
368 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
369 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
371 BUILD_BUG_ON(sizeof(aux_msg) > 20);
372 for (i = 0; i < sizeof(aux_msg); i += 4)
373 intel_de_write(dev_priv,
374 EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
375 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
377 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
379 /* Start with bits set for DDI_AUX_CTL register */
380 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
383 /* Select only valid bits for SRD_AUX_CTL */
384 aux_ctl &= psr_aux_mask;
385 intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
389 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
391 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
392 u8 dpcd_val = DP_PSR_ENABLE;
394 /* Enable ALPM at sink for psr2 */
395 if (intel_dp->psr.psr2_enabled) {
396 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
398 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
400 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
402 if (intel_dp->psr.link_standby)
403 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
405 if (DISPLAY_VER(dev_priv) >= 8)
406 dpcd_val |= DP_PSR_CRC_VERIFICATION;
409 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
411 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
414 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
416 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
419 if (DISPLAY_VER(dev_priv) >= 11)
420 val |= EDP_PSR_TP4_TIME_0US;
422 if (dev_priv->params.psr_safest_params) {
423 val |= EDP_PSR_TP1_TIME_2500us;
424 val |= EDP_PSR_TP2_TP3_TIME_2500us;
428 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
429 val |= EDP_PSR_TP1_TIME_0us;
430 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
431 val |= EDP_PSR_TP1_TIME_100us;
432 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
433 val |= EDP_PSR_TP1_TIME_500us;
435 val |= EDP_PSR_TP1_TIME_2500us;
437 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
438 val |= EDP_PSR_TP2_TP3_TIME_0us;
439 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
440 val |= EDP_PSR_TP2_TP3_TIME_100us;
441 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
442 val |= EDP_PSR_TP2_TP3_TIME_500us;
444 val |= EDP_PSR_TP2_TP3_TIME_2500us;
447 if (intel_dp_source_supports_hbr2(intel_dp) &&
448 drm_dp_tps3_supported(intel_dp->dpcd))
449 val |= EDP_PSR_TP1_TP3_SEL;
451 val |= EDP_PSR_TP1_TP2_SEL;
456 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
458 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
461 /* Let's use 6 as the minimum to cover all known cases including the
462 * off-by-one issue that HW has in some cases.
464 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
465 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
467 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
473 static void hsw_activate_psr1(struct intel_dp *intel_dp)
475 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
476 u32 max_sleep_time = 0x1f;
477 u32 val = EDP_PSR_ENABLE;
479 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
481 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
482 if (IS_HASWELL(dev_priv))
483 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
485 if (intel_dp->psr.link_standby)
486 val |= EDP_PSR_LINK_STANDBY;
488 val |= intel_psr1_get_tp_time(intel_dp);
490 if (DISPLAY_VER(dev_priv) >= 8)
491 val |= EDP_PSR_CRC_ENABLE;
493 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
494 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
495 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
498 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
503 if (dev_priv->params.psr_safest_params)
504 return EDP_PSR2_TP2_TIME_2500us;
506 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
507 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
508 val |= EDP_PSR2_TP2_TIME_50us;
509 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
510 val |= EDP_PSR2_TP2_TIME_100us;
511 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
512 val |= EDP_PSR2_TP2_TIME_500us;
514 val |= EDP_PSR2_TP2_TIME_2500us;
519 static void hsw_activate_psr2(struct intel_dp *intel_dp)
521 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
524 val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
526 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
527 if (DISPLAY_VER(dev_priv) >= 10)
528 val |= EDP_Y_COORDINATE_ENABLE;
530 val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
531 val |= intel_psr2_get_tp_time(intel_dp);
533 if (DISPLAY_VER(dev_priv) >= 12) {
535 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
536 * values from BSpec. In order to setting an optimal power
537 * consumption, lower than 4k resoluition mode needs to decrese
538 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
539 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
541 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
542 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
543 val |= TGL_EDP_PSR2_FAST_WAKE(7);
544 } else if (DISPLAY_VER(dev_priv) >= 9) {
545 val |= EDP_PSR2_IO_BUFFER_WAKE(7);
546 val |= EDP_PSR2_FAST_WAKE(7);
549 if (intel_dp->psr.psr2_sel_fetch_enabled) {
551 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
552 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
553 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
554 DIS_RAM_BYPASS_PSR2_MAN_TRACK,
555 DIS_RAM_BYPASS_PSR2_MAN_TRACK);
557 intel_de_write(dev_priv,
558 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
559 PSR2_MAN_TRK_CTL_ENABLE);
560 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
561 intel_de_write(dev_priv,
562 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
566 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
567 * recommending keep this bit unset while PSR2 is enabled.
569 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
571 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
575 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
577 if (DISPLAY_VER(dev_priv) < 9)
579 else if (DISPLAY_VER(dev_priv) >= 12)
580 return trans == TRANSCODER_A;
582 return trans == TRANSCODER_EDP;
585 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
587 if (!cstate || !cstate->hw.active)
590 return DIV_ROUND_UP(1000 * 1000,
591 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
594 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
597 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
600 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
601 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
602 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
604 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
607 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
609 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
611 psr2_program_idle_frames(intel_dp, 0);
612 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
615 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
617 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
619 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
620 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
623 static void tgl_dc3co_disable_work(struct work_struct *work)
625 struct intel_dp *intel_dp =
626 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
628 mutex_lock(&intel_dp->psr.lock);
629 /* If delayed work is pending, it is not idle */
630 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
633 tgl_psr2_disable_dc3co(intel_dp);
635 mutex_unlock(&intel_dp->psr.lock);
638 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
640 if (!intel_dp->psr.dc3co_enabled)
643 cancel_delayed_work(&intel_dp->psr.dc3co_work);
644 /* Before PSR2 exit disallow dc3co*/
645 tgl_psr2_disable_dc3co(intel_dp);
649 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
650 struct intel_crtc_state *crtc_state)
652 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
654 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
658 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
659 * TODO: when the issue is addressed, this restriction should be removed.
661 if (crtc_state->enable_psr2_sel_fetch)
664 if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
667 /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
668 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
669 dig_port->base.port != PORT_A)
673 * DC3CO Exit time 200us B.Spec 49196
674 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
677 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
679 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
682 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
685 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
686 struct intel_crtc_state *crtc_state)
688 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
689 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
690 struct intel_plane_state *plane_state;
691 struct intel_plane *plane;
694 if (!dev_priv->params.enable_psr2_sel_fetch &&
695 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
696 drm_dbg_kms(&dev_priv->drm,
697 "PSR2 sel fetch not enabled, disabled by parameter\n");
701 if (crtc_state->uapi.async_flip) {
702 drm_dbg_kms(&dev_priv->drm,
703 "PSR2 sel fetch not enabled, async flip enabled\n");
707 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
708 if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
709 drm_dbg_kms(&dev_priv->drm,
710 "PSR2 sel fetch not enabled, plane rotated\n");
715 return crtc_state->enable_psr2_sel_fetch = true;
718 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
719 struct intel_crtc_state *crtc_state)
721 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
722 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
723 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
724 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
726 if (!intel_dp->psr.sink_psr2_support)
729 /* JSL and EHL only supports eDP 1.3 */
730 if (IS_JSL_EHL(dev_priv)) {
731 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
735 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
736 drm_dbg_kms(&dev_priv->drm,
737 "PSR2 not supported in transcoder %s\n",
738 transcoder_name(crtc_state->cpu_transcoder));
742 if (!psr2_global_enabled(intel_dp)) {
743 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
748 * DSC and PSR2 cannot be enabled simultaneously. If a requested
749 * resolution requires DSC to be enabled, priority is given to DSC
752 if (crtc_state->dsc.compression_enable) {
753 drm_dbg_kms(&dev_priv->drm,
754 "PSR2 cannot be enabled since DSC is enabled\n");
758 if (crtc_state->crc_enabled) {
759 drm_dbg_kms(&dev_priv->drm,
760 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
764 if (DISPLAY_VER(dev_priv) >= 12) {
768 } else if (DISPLAY_VER(dev_priv) >= 10) {
772 } else if (IS_DISPLAY_VER(dev_priv, 9)) {
778 if (crtc_state->pipe_bpp > max_bpp) {
779 drm_dbg_kms(&dev_priv->drm,
780 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
781 crtc_state->pipe_bpp, max_bpp);
786 * HW sends SU blocks of size four scan lines, which means the starting
787 * X coordinate and Y granularity requirements will always be met. We
788 * only need to validate the SU block width is a multiple of
791 if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
792 drm_dbg_kms(&dev_priv->drm,
793 "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
794 crtc_hdisplay, intel_dp->psr.su_x_granularity);
798 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
799 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
800 !HAS_PSR_HW_TRACKING(dev_priv)) {
801 drm_dbg_kms(&dev_priv->drm,
802 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
807 if (!crtc_state->enable_psr2_sel_fetch &&
808 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
809 drm_dbg_kms(&dev_priv->drm,
810 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
811 crtc_hdisplay, crtc_vdisplay,
812 psr_max_h, psr_max_v);
816 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
820 void intel_psr_compute_config(struct intel_dp *intel_dp,
821 struct intel_crtc_state *crtc_state)
823 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
824 const struct drm_display_mode *adjusted_mode =
825 &crtc_state->hw.adjusted_mode;
829 * Current PSR panels dont work reliably with VRR enabled
830 * So if VRR is enabled, do not enable PSR.
832 if (crtc_state->vrr.enable)
835 if (!CAN_PSR(intel_dp))
838 if (!psr_global_enabled(intel_dp)) {
839 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
843 if (intel_dp->psr.sink_not_reliable) {
844 drm_dbg_kms(&dev_priv->drm,
845 "PSR sink implementation is not reliable\n");
849 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
850 drm_dbg_kms(&dev_priv->drm,
851 "PSR condition failed: Interlaced mode enabled\n");
855 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
856 if (psr_setup_time < 0) {
857 drm_dbg_kms(&dev_priv->drm,
858 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
859 intel_dp->psr_dpcd[1]);
863 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
864 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
865 drm_dbg_kms(&dev_priv->drm,
866 "PSR condition failed: PSR setup time (%d us) too long\n",
871 crtc_state->has_psr = true;
872 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
873 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
876 static void intel_psr_activate(struct intel_dp *intel_dp)
878 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
879 enum transcoder transcoder = intel_dp->psr.transcoder;
881 if (transcoder_has_psr2(dev_priv, transcoder))
882 drm_WARN_ON(&dev_priv->drm,
883 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
885 drm_WARN_ON(&dev_priv->drm,
886 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
887 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
888 lockdep_assert_held(&intel_dp->psr.lock);
890 /* psr1 and psr2 are mutually exclusive.*/
891 if (intel_dp->psr.psr2_enabled)
892 hsw_activate_psr2(intel_dp);
894 hsw_activate_psr1(intel_dp);
896 intel_dp->psr.active = true;
899 static void intel_psr_enable_source(struct intel_dp *intel_dp,
900 const struct intel_crtc_state *crtc_state)
902 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
903 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
906 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
907 * use hardcoded values PSR AUX transactions
909 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
910 hsw_psr_setup_aux(intel_dp);
912 if (intel_dp->psr.psr2_enabled && IS_DISPLAY_VER(dev_priv, 9)) {
913 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
914 u32 chicken = intel_de_read(dev_priv, reg);
916 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
917 PSR2_ADD_VERTICAL_LINE_COUNT;
918 intel_de_write(dev_priv, reg, chicken);
922 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
923 * mask LPSP to avoid dependency on other drivers that might block
924 * runtime_pm besides preventing other hw tracking issues now we
925 * can rely on frontbuffer tracking.
927 mask = EDP_PSR_DEBUG_MASK_MEMUP |
928 EDP_PSR_DEBUG_MASK_HPD |
929 EDP_PSR_DEBUG_MASK_LPSP |
930 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
932 if (DISPLAY_VER(dev_priv) < 11)
933 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
935 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
938 psr_irq_control(intel_dp);
940 if (crtc_state->dc3co_exitline) {
944 * TODO: if future platforms supports DC3CO in more than one
945 * transcoder, EXITLINE will need to be unset when disabling PSR
947 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
948 val &= ~EXITLINE_MASK;
949 val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
950 val |= EXITLINE_ENABLE;
951 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
954 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
955 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
956 intel_dp->psr.psr2_sel_fetch_enabled ?
957 IGNORE_PSR2_HW_TRACKING : 0);
960 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
961 const struct intel_crtc_state *crtc_state,
962 const struct drm_connector_state *conn_state)
964 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
965 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
966 struct intel_encoder *encoder = &dig_port->base;
969 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
971 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
972 intel_dp->psr.busy_frontbuffer_bits = 0;
973 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
974 intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
975 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
976 /* DC5/DC6 requires at least 6 idle frames */
977 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
978 intel_dp->psr.dc3co_exit_delay = val;
979 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
982 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
983 * will still keep the error set even after the reset done in the
984 * irq_preinstall and irq_uninstall hooks.
985 * And enabling in this situation cause the screen to freeze in the
986 * first time that PSR HW tries to activate so lets keep PSR disabled
987 * to avoid any rendering problems.
989 if (DISPLAY_VER(dev_priv) >= 12) {
990 val = intel_de_read(dev_priv,
991 TRANS_PSR_IIR(intel_dp->psr.transcoder));
992 val &= EDP_PSR_ERROR(0);
994 val = intel_de_read(dev_priv, EDP_PSR_IIR);
995 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
998 intel_dp->psr.sink_not_reliable = true;
999 drm_dbg_kms(&dev_priv->drm,
1000 "PSR interruption error set, not enabling PSR\n");
1004 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1005 intel_dp->psr.psr2_enabled ? "2" : "1");
1006 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1007 &intel_dp->psr.vsc);
1008 intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1009 intel_psr_enable_sink(intel_dp);
1010 intel_psr_enable_source(intel_dp, crtc_state);
1011 intel_dp->psr.enabled = true;
1013 intel_psr_activate(intel_dp);
1017 * intel_psr_enable - Enable PSR
1018 * @intel_dp: Intel DP
1019 * @crtc_state: new CRTC state
1020 * @conn_state: new CONNECTOR state
1022 * This function can only be called after the pipe is fully trained and enabled.
1024 void intel_psr_enable(struct intel_dp *intel_dp,
1025 const struct intel_crtc_state *crtc_state,
1026 const struct drm_connector_state *conn_state)
1028 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1030 if (!CAN_PSR(intel_dp))
1033 if (!crtc_state->has_psr)
1036 drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1038 mutex_lock(&intel_dp->psr.lock);
1039 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1040 mutex_unlock(&intel_dp->psr.lock);
1043 static void intel_psr_exit(struct intel_dp *intel_dp)
1045 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1048 if (!intel_dp->psr.active) {
1049 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1050 val = intel_de_read(dev_priv,
1051 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1052 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1055 val = intel_de_read(dev_priv,
1056 EDP_PSR_CTL(intel_dp->psr.transcoder));
1057 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1062 if (intel_dp->psr.psr2_enabled) {
1063 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1064 val = intel_de_read(dev_priv,
1065 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1066 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1067 val &= ~EDP_PSR2_ENABLE;
1068 intel_de_write(dev_priv,
1069 EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1071 val = intel_de_read(dev_priv,
1072 EDP_PSR_CTL(intel_dp->psr.transcoder));
1073 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1074 val &= ~EDP_PSR_ENABLE;
1075 intel_de_write(dev_priv,
1076 EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1078 intel_dp->psr.active = false;
1081 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1083 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1084 i915_reg_t psr_status;
1085 u32 psr_status_mask;
1087 lockdep_assert_held(&intel_dp->psr.lock);
1089 if (!intel_dp->psr.enabled)
1092 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1093 intel_dp->psr.psr2_enabled ? "2" : "1");
1095 intel_psr_exit(intel_dp);
1097 if (intel_dp->psr.psr2_enabled) {
1098 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1099 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1101 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1102 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1105 /* Wait till PSR is idle */
1106 if (intel_de_wait_for_clear(dev_priv, psr_status,
1107 psr_status_mask, 2000))
1108 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1111 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1112 (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
1113 IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
1114 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1115 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
1117 /* Disable PSR on Sink */
1118 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1120 if (intel_dp->psr.psr2_enabled)
1121 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1123 intel_dp->psr.enabled = false;
1127 * intel_psr_disable - Disable PSR
1128 * @intel_dp: Intel DP
1129 * @old_crtc_state: old CRTC state
1131 * This function needs to be called before disabling pipe.
1133 void intel_psr_disable(struct intel_dp *intel_dp,
1134 const struct intel_crtc_state *old_crtc_state)
1136 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1138 if (!old_crtc_state->has_psr)
1141 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1144 mutex_lock(&intel_dp->psr.lock);
1146 intel_psr_disable_locked(intel_dp);
1148 mutex_unlock(&intel_dp->psr.lock);
1149 cancel_work_sync(&intel_dp->psr.work);
1150 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1153 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1155 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1157 if (IS_TIGERLAKE(dev_priv))
1159 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
1160 * visual glitches that are often reproduced when executing
1161 * CPU intensive workloads while a eDP 4K panel is attached.
1163 * Manually exiting PSR causes the frontbuffer to be updated
1164 * without glitches and the IOMMU errors are also gone but
1165 * this comes at the cost of less time with PSR active.
1167 * So using this workaround until this issue is root caused
1168 * and a better fix is found.
1170 intel_psr_exit(intel_dp);
1171 else if (DISPLAY_VER(dev_priv) >= 9)
1173 * Display WA #0884: skl+
1174 * This documented WA for bxt can be safely applied
1175 * broadly so we can force HW tracking to exit PSR
1176 * instead of disabling and re-enabling.
1177 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1178 * but it makes more sense write to the current active
1181 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1184 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
1185 * on older gens so doing the manual exit instead.
1187 intel_psr_exit(intel_dp);
1190 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1191 const struct intel_crtc_state *crtc_state,
1192 const struct intel_plane_state *plane_state,
1195 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1196 enum pipe pipe = plane->pipe;
1197 const struct drm_rect *clip;
1201 if (!crtc_state->enable_psr2_sel_fetch)
1204 val = plane_state ? plane_state->ctl : 0;
1205 val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
1206 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
1207 if (!val || plane->id == PLANE_CURSOR)
1210 clip = &plane_state->psr2_sel_fetch_area;
1212 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1213 val |= plane_state->uapi.dst.x1;
1214 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1216 /* TODO: consider auxiliary surfaces */
1217 x = plane_state->uapi.src.x1 >> 16;
1218 y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
1219 ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
1221 drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
1224 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1227 /* Sizes are 0 based */
1228 val = (drm_rect_height(clip) - 1) << 16;
1229 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1230 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1233 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1235 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1237 if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
1238 !crtc_state->enable_psr2_sel_fetch)
1241 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1242 crtc_state->psr2_man_track_ctl);
1245 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1246 struct drm_rect *clip, bool full_update)
1248 u32 val = PSR2_MAN_TRK_CTL_ENABLE;
1251 val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1258 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1260 val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1261 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1262 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1264 crtc_state->psr2_man_track_ctl = val;
1267 static void clip_area_update(struct drm_rect *overlap_damage_area,
1268 struct drm_rect *damage_area)
1270 if (overlap_damage_area->y1 == -1) {
1271 overlap_damage_area->y1 = damage_area->y1;
1272 overlap_damage_area->y2 = damage_area->y2;
1276 if (damage_area->y1 < overlap_damage_area->y1)
1277 overlap_damage_area->y1 = damage_area->y1;
1279 if (damage_area->y2 > overlap_damage_area->y2)
1280 overlap_damage_area->y2 = damage_area->y2;
1283 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1284 struct intel_crtc *crtc)
1286 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1287 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1288 struct intel_plane_state *new_plane_state, *old_plane_state;
1289 struct intel_plane *plane;
1290 bool full_update = false;
1293 if (!crtc_state->enable_psr2_sel_fetch)
1296 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1301 * Calculate minimal selective fetch area of each plane and calculate
1302 * the pipe damaged area.
1303 * In the next loop the plane selective fetch area will actually be set
1304 * using whole pipe damaged area.
1306 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1307 new_plane_state, i) {
1308 struct drm_rect src, damaged_area = { .y1 = -1 };
1309 struct drm_mode_rect *damaged_clips;
1312 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1315 if (!new_plane_state->uapi.visible &&
1316 !old_plane_state->uapi.visible)
1320 * TODO: Not clear how to handle planes with negative position,
1321 * also planes are not updated if they have a negative X
1322 * position so for now doing a full update in this cases
1324 if (new_plane_state->uapi.dst.y1 < 0 ||
1325 new_plane_state->uapi.dst.x1 < 0) {
1330 num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1333 * If visibility or plane moved, mark the whole plane area as
1334 * damaged as it needs to be complete redraw in the new and old
1337 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1338 !drm_rect_equals(&new_plane_state->uapi.dst,
1339 &old_plane_state->uapi.dst)) {
1340 if (old_plane_state->uapi.visible) {
1341 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1342 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1343 clip_area_update(&pipe_clip, &damaged_area);
1346 if (new_plane_state->uapi.visible) {
1347 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1348 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1349 clip_area_update(&pipe_clip, &damaged_area);
1352 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
1354 new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
1356 * If the plane don't have damaged areas but the
1357 * framebuffer changed or alpha changed, mark the whole
1358 * plane area as damaged.
1360 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1361 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1362 clip_area_update(&pipe_clip, &damaged_area);
1366 drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
1367 damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
1369 for (j = 0; j < num_clips; j++) {
1370 struct drm_rect clip;
1372 clip.x1 = damaged_clips[j].x1;
1373 clip.y1 = damaged_clips[j].y1;
1374 clip.x2 = damaged_clips[j].x2;
1375 clip.y2 = damaged_clips[j].y2;
1376 if (drm_rect_intersect(&clip, &src))
1377 clip_area_update(&damaged_area, &clip);
1380 if (damaged_area.y1 == -1)
1383 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1384 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1385 clip_area_update(&pipe_clip, &damaged_area);
1389 goto skip_sel_fetch_set_loop;
1391 /* It must be aligned to 4 lines */
1392 pipe_clip.y1 -= pipe_clip.y1 % 4;
1393 if (pipe_clip.y2 % 4)
1394 pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
1397 * Now that we have the pipe damaged area check if it intersect with
1398 * every plane, if it does set the plane selective fetch area.
1400 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1401 new_plane_state, i) {
1402 struct drm_rect *sel_fetch_area, inter;
1404 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1405 !new_plane_state->uapi.visible)
1409 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1412 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1413 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1414 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1417 skip_sel_fetch_set_loop:
1418 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1423 * intel_psr_update - Update PSR state
1424 * @intel_dp: Intel DP
1425 * @crtc_state: new CRTC state
1426 * @conn_state: new CONNECTOR state
1428 * This functions will update PSR states, disabling, enabling or switching PSR
1429 * version when executing fastsets. For full modeset, intel_psr_disable() and
1430 * intel_psr_enable() should be called instead.
1432 void intel_psr_update(struct intel_dp *intel_dp,
1433 const struct intel_crtc_state *crtc_state,
1434 const struct drm_connector_state *conn_state)
1436 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1437 struct intel_psr *psr = &intel_dp->psr;
1438 bool enable, psr2_enable;
1440 if (!CAN_PSR(intel_dp))
1443 mutex_lock(&intel_dp->psr.lock);
1445 enable = crtc_state->has_psr;
1446 psr2_enable = crtc_state->has_psr2;
1448 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
1449 crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1450 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1451 if (crtc_state->crc_enabled && psr->enabled)
1452 psr_force_hw_tracking_exit(intel_dp);
1453 else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
1455 * Activate PSR again after a force exit when enabling
1458 if (!intel_dp->psr.active &&
1459 !intel_dp->psr.busy_frontbuffer_bits)
1460 schedule_work(&intel_dp->psr.work);
1467 intel_psr_disable_locked(intel_dp);
1470 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1473 mutex_unlock(&intel_dp->psr.lock);
1477 * psr_wait_for_idle - wait for PSR1 to idle
1478 * @intel_dp: Intel DP
1479 * @out_value: PSR status in case of failure
1481 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1484 static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1486 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1489 * From bspec: Panel Self Refresh (BDW+)
1490 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1491 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1492 * defensive enough to cover everything.
1494 return __intel_wait_for_register(&dev_priv->uncore,
1495 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1496 EDP_PSR_STATUS_STATE_MASK,
1497 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1502 * intel_psr_wait_for_idle - wait for PSR1 to idle
1503 * @new_crtc_state: new CRTC state
1505 * This function is expected to be called from pipe_update_start() where it is
1506 * not expected to race with PSR enable or disable.
1508 void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
1510 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1511 struct intel_encoder *encoder;
1513 if (!new_crtc_state->has_psr)
1516 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1517 new_crtc_state->uapi.encoder_mask) {
1518 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1521 mutex_lock(&intel_dp->psr.lock);
1522 if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
1523 mutex_unlock(&intel_dp->psr.lock);
1527 /* when the PSR1 is enabled */
1528 if (psr_wait_for_idle(intel_dp, &psr_status))
1529 drm_err(&dev_priv->drm,
1530 "PSR idle timed out 0x%x, atomic update may fail\n",
1532 mutex_unlock(&intel_dp->psr.lock);
1536 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
1538 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1543 if (!intel_dp->psr.enabled)
1546 if (intel_dp->psr.psr2_enabled) {
1547 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1548 mask = EDP_PSR2_STATUS_STATE_MASK;
1550 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1551 mask = EDP_PSR_STATUS_STATE_MASK;
1554 mutex_unlock(&intel_dp->psr.lock);
1556 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1558 drm_err(&dev_priv->drm,
1559 "Timed out waiting for PSR Idle for re-enable\n");
1561 /* After the unlocked wait, verify that PSR is still wanted! */
1562 mutex_lock(&intel_dp->psr.lock);
1563 return err == 0 && intel_dp->psr.enabled;
1566 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1568 struct drm_connector_list_iter conn_iter;
1569 struct drm_device *dev = &dev_priv->drm;
1570 struct drm_modeset_acquire_ctx ctx;
1571 struct drm_atomic_state *state;
1572 struct drm_connector *conn;
1575 state = drm_atomic_state_alloc(dev);
1579 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1580 state->acquire_ctx = &ctx;
1584 drm_connector_list_iter_begin(dev, &conn_iter);
1585 drm_for_each_connector_iter(conn, &conn_iter) {
1586 struct drm_connector_state *conn_state;
1587 struct drm_crtc_state *crtc_state;
1589 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
1592 conn_state = drm_atomic_get_connector_state(state, conn);
1593 if (IS_ERR(conn_state)) {
1594 err = PTR_ERR(conn_state);
1598 if (!conn_state->crtc)
1601 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
1602 if (IS_ERR(crtc_state)) {
1603 err = PTR_ERR(crtc_state);
1607 /* Mark mode as changed to trigger a pipe->update() */
1608 crtc_state->mode_changed = true;
1610 drm_connector_list_iter_end(&conn_iter);
1613 err = drm_atomic_commit(state);
1615 if (err == -EDEADLK) {
1616 drm_atomic_state_clear(state);
1617 err = drm_modeset_backoff(&ctx);
1622 drm_modeset_drop_locks(&ctx);
1623 drm_modeset_acquire_fini(&ctx);
1624 drm_atomic_state_put(state);
1629 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1631 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1632 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1636 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1637 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1638 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1642 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1646 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1647 intel_dp->psr.debug = val;
1650 * Do it right away if it's already enabled, otherwise it will be done
1651 * when enabling the source.
1653 if (intel_dp->psr.enabled)
1654 psr_irq_control(intel_dp);
1656 mutex_unlock(&intel_dp->psr.lock);
1658 if (old_mode != mode)
1659 ret = intel_psr_fastset_force(dev_priv);
1664 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1666 struct intel_psr *psr = &intel_dp->psr;
1668 intel_psr_disable_locked(intel_dp);
1669 psr->sink_not_reliable = true;
1670 /* let's make sure that sink is awaken */
1671 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1674 static void intel_psr_work(struct work_struct *work)
1676 struct intel_dp *intel_dp =
1677 container_of(work, typeof(*intel_dp), psr.work);
1679 mutex_lock(&intel_dp->psr.lock);
1681 if (!intel_dp->psr.enabled)
1684 if (READ_ONCE(intel_dp->psr.irq_aux_error))
1685 intel_psr_handle_irq(intel_dp);
1688 * We have to make sure PSR is ready for re-enable
1689 * otherwise it keeps disabled until next full enable/disable cycle.
1690 * PSR might take some time to get fully disabled
1691 * and be ready for re-enable.
1693 if (!__psr_wait_for_idle_locked(intel_dp))
1697 * The delayed work can race with an invalidate hence we need to
1698 * recheck. Since psr_flush first clears this and then reschedules we
1699 * won't ever miss a flush when bailing out here.
1701 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
1704 intel_psr_activate(intel_dp);
1706 mutex_unlock(&intel_dp->psr.lock);
1710 * intel_psr_invalidate - Invalidade PSR
1711 * @dev_priv: i915 device
1712 * @frontbuffer_bits: frontbuffer plane tracking bits
1713 * @origin: which operation caused the invalidate
1715 * Since the hardware frontbuffer tracking has gaps we need to integrate
1716 * with the software frontbuffer tracking. This function gets called every
1717 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1718 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1720 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1722 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1723 unsigned frontbuffer_bits, enum fb_op_origin origin)
1725 struct intel_encoder *encoder;
1727 if (origin == ORIGIN_FLIP)
1730 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1731 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1732 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1734 mutex_lock(&intel_dp->psr.lock);
1735 if (!intel_dp->psr.enabled) {
1736 mutex_unlock(&intel_dp->psr.lock);
1740 pipe_frontbuffer_bits &=
1741 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1742 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1744 if (pipe_frontbuffer_bits)
1745 intel_psr_exit(intel_dp);
1747 mutex_unlock(&intel_dp->psr.lock);
1751 * When we will be completely rely on PSR2 S/W tracking in future,
1752 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1753 * event also therefore tgl_dc3co_flush() require to be changed
1754 * accordingly in future.
1757 tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
1758 enum fb_op_origin origin)
1760 mutex_lock(&intel_dp->psr.lock);
1762 if (!intel_dp->psr.dc3co_enabled)
1765 if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1769 * At every frontbuffer flush flip event modified delay of delayed work,
1770 * when delayed work schedules that means display has been idle.
1772 if (!(frontbuffer_bits &
1773 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1776 tgl_psr2_enable_dc3co(intel_dp);
1777 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
1778 intel_dp->psr.dc3co_exit_delay);
1781 mutex_unlock(&intel_dp->psr.lock);
1785 * intel_psr_flush - Flush PSR
1786 * @dev_priv: i915 device
1787 * @frontbuffer_bits: frontbuffer plane tracking bits
1788 * @origin: which operation caused the flush
1790 * Since the hardware frontbuffer tracking has gaps we need to integrate
1791 * with the software frontbuffer tracking. This function gets called every
1792 * time frontbuffer rendering has completed and flushed out to memory. PSR
1793 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1795 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1797 void intel_psr_flush(struct drm_i915_private *dev_priv,
1798 unsigned frontbuffer_bits, enum fb_op_origin origin)
1800 struct intel_encoder *encoder;
1802 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1803 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
1804 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1806 if (origin == ORIGIN_FLIP) {
1807 tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
1811 mutex_lock(&intel_dp->psr.lock);
1812 if (!intel_dp->psr.enabled) {
1813 mutex_unlock(&intel_dp->psr.lock);
1817 pipe_frontbuffer_bits &=
1818 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
1819 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
1821 /* By definition flush = invalidate + flush */
1822 if (pipe_frontbuffer_bits)
1823 psr_force_hw_tracking_exit(intel_dp);
1825 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
1826 schedule_work(&intel_dp->psr.work);
1827 mutex_unlock(&intel_dp->psr.lock);
1832 * intel_psr_init - Init basic PSR work and mutex.
1833 * @intel_dp: Intel DP
1835 * This function is called after the initializing connector.
1836 * (the initializing of connector treats the handling of connector capabilities)
1837 * And it initializes basic PSR stuff for each DP Encoder.
1839 void intel_psr_init(struct intel_dp *intel_dp)
1841 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1842 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1844 if (!HAS_PSR(dev_priv))
1848 * HSW spec explicitly says PSR is tied to port A.
1849 * BDW+ platforms have a instance of PSR registers per transcoder but
1850 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
1852 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
1853 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
1854 * But GEN12 supports a instance of PSR registers per transcoder.
1856 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
1857 drm_dbg_kms(&dev_priv->drm,
1858 "PSR condition failed: Port not supported\n");
1862 intel_dp->psr.source_support = true;
1864 if (IS_HASWELL(dev_priv))
1866 * HSW don't have PSR registers on the same space as transcoder
1867 * so set this to a value that when subtract to the register
1868 * in transcoder space results in the right offset for HSW
1870 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
1872 if (dev_priv->params.enable_psr == -1)
1873 if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1874 dev_priv->params.enable_psr = 0;
1876 /* Set link_standby x link_off defaults */
1877 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1878 /* HSW and BDW require workarounds that we don't implement. */
1879 intel_dp->psr.link_standby = false;
1880 else if (DISPLAY_VER(dev_priv) < 12)
1881 /* For new platforms up to TGL let's respect VBT back again */
1882 intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
1884 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
1885 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
1886 mutex_init(&intel_dp->psr.lock);
1889 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
1890 u8 *status, u8 *error_status)
1892 struct drm_dp_aux *aux = &intel_dp->aux;
1895 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
1899 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
1903 *status = *status & DP_PSR_SINK_STATE_MASK;
1908 static void psr_alpm_check(struct intel_dp *intel_dp)
1910 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1911 struct drm_dp_aux *aux = &intel_dp->aux;
1912 struct intel_psr *psr = &intel_dp->psr;
1916 if (!psr->psr2_enabled)
1919 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
1921 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
1925 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
1926 intel_psr_disable_locked(intel_dp);
1927 psr->sink_not_reliable = true;
1928 drm_dbg_kms(&dev_priv->drm,
1929 "ALPM lock timeout error, disabling PSR\n");
1931 /* Clearing error */
1932 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
1936 static void psr_capability_changed_check(struct intel_dp *intel_dp)
1938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1939 struct intel_psr *psr = &intel_dp->psr;
1943 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
1945 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
1949 if (val & DP_PSR_CAPS_CHANGE) {
1950 intel_psr_disable_locked(intel_dp);
1951 psr->sink_not_reliable = true;
1952 drm_dbg_kms(&dev_priv->drm,
1953 "Sink PSR capability changed, disabling PSR\n");
1956 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
1960 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1962 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1963 struct intel_psr *psr = &intel_dp->psr;
1964 u8 status, error_status;
1965 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1966 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1967 DP_PSR_LINK_CRC_ERROR;
1969 if (!CAN_PSR(intel_dp))
1972 mutex_lock(&psr->lock);
1977 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
1978 drm_err(&dev_priv->drm,
1979 "Error reading PSR status or error status\n");
1983 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
1984 intel_psr_disable_locked(intel_dp);
1985 psr->sink_not_reliable = true;
1988 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
1989 drm_dbg_kms(&dev_priv->drm,
1990 "PSR sink internal error, disabling PSR\n");
1991 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
1992 drm_dbg_kms(&dev_priv->drm,
1993 "PSR RFB storage error, disabling PSR\n");
1994 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1995 drm_dbg_kms(&dev_priv->drm,
1996 "PSR VSC SDP uncorrectable error, disabling PSR\n");
1997 if (error_status & DP_PSR_LINK_CRC_ERROR)
1998 drm_dbg_kms(&dev_priv->drm,
1999 "PSR Link CRC error, disabling PSR\n");
2001 if (error_status & ~errors)
2002 drm_err(&dev_priv->drm,
2003 "PSR_ERROR_STATUS unhandled errors %x\n",
2004 error_status & ~errors);
2005 /* clear status register */
2006 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2008 psr_alpm_check(intel_dp);
2009 psr_capability_changed_check(intel_dp);
2012 mutex_unlock(&psr->lock);
2015 bool intel_psr_enabled(struct intel_dp *intel_dp)
2019 if (!CAN_PSR(intel_dp))
2022 mutex_lock(&intel_dp->psr.lock);
2023 ret = intel_dp->psr.enabled;
2024 mutex_unlock(&intel_dp->psr.lock);