2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_atomic_helper.h>
26 #include "display/intel_dp.h"
29 #include "intel_atomic.h"
31 #include "intel_display_types.h"
32 #include "intel_dp_aux.h"
33 #include "intel_hdmi.h"
34 #include "intel_psr.h"
35 #include "intel_sprite.h"
36 #include "skl_universal_plane.h"
39 * DOC: Panel Self Refresh (PSR/SRD)
41 * Since Haswell Display controller supports Panel Self-Refresh on display
42 * panels witch have a remote frame buffer (RFB) implemented according to PSR
43 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
44 * when system is idle but display is on as it eliminates display refresh
45 * request to DDR memory completely as long as the frame buffer for that
46 * display is unchanged.
48 * Panel Self Refresh must be supported by both Hardware (source) and
51 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
52 * to power down the link and memory controller. For DSI panels the same idea
53 * is called "manual mode".
55 * The implementation uses the hardware-based PSR support which automatically
56 * enters/exits self-refresh mode. The hardware takes care of sending the
57 * required DP aux message and could even retrain the link (that part isn't
58 * enabled yet though). The hardware also keeps track of any frontbuffer
59 * changes to know when to exit self-refresh mode again. Unfortunately that
60 * part doesn't work too well, hence why the i915 PSR support uses the
61 * software frontbuffer tracking to make sure it doesn't miss a screen
62 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
63 * get called by the frontbuffer tracking code. Note that because of locking
64 * issues the self-refresh re-enable code is done from a work queue, which
65 * must be correctly synchronized/cancelled when shutting down the pipe."
67 * DC3CO (DC3 clock off)
69 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
70 * clock off automatically during PSR2 idle state.
71 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
72 * entry/exit allows the HW to enter a low-power state even when page flipping
73 * periodically (for instance a 30fps video playback scenario).
75 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
76 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
77 * frames, if no other flip occurs and the function above is executed, DC3CO is
78 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
80 * Front buffer modifications do not trigger DC3CO activation on purpose as it
81 * would bring a lot of complexity and most of the moderns systems will only
85 static bool psr_global_enabled(struct intel_dp *intel_dp)
87 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
89 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
90 case I915_PSR_DEBUG_DEFAULT:
91 return i915->params.enable_psr;
92 case I915_PSR_DEBUG_DISABLE:
99 static bool psr2_global_enabled(struct intel_dp *intel_dp)
101 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
102 case I915_PSR_DEBUG_DISABLE:
103 case I915_PSR_DEBUG_FORCE_PSR1:
110 static void psr_irq_control(struct intel_dp *intel_dp)
112 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
113 enum transcoder trans_shift;
118 * gen12+ has registers relative to transcoder and one per transcoder
119 * using the same bit definition: handle it as TRANSCODER_EDP to force
120 * 0 shift in bit definition
122 if (DISPLAY_VER(dev_priv) >= 12) {
124 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
126 trans_shift = intel_dp->psr.transcoder;
127 imr_reg = EDP_PSR_IMR;
130 mask = EDP_PSR_ERROR(trans_shift);
131 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
132 mask |= EDP_PSR_POST_EXIT(trans_shift) |
133 EDP_PSR_PRE_ENTRY(trans_shift);
135 /* Warning: it is masking/setting reserved bits too */
136 val = intel_de_read(dev_priv, imr_reg);
137 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
139 intel_de_write(dev_priv, imr_reg, val);
142 static void psr_event_print(struct drm_i915_private *i915,
143 u32 val, bool psr2_enabled)
145 drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
146 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
147 drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
148 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
149 drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
150 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
151 drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
152 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
153 drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
154 if (val & PSR_EVENT_GRAPHICS_RESET)
155 drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
156 if (val & PSR_EVENT_PCH_INTERRUPT)
157 drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
158 if (val & PSR_EVENT_MEMORY_UP)
159 drm_dbg_kms(&i915->drm, "\tMemory up\n");
160 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
161 drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
162 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
163 drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
164 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
165 drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
166 if (val & PSR_EVENT_REGISTER_UPDATE)
167 drm_dbg_kms(&i915->drm, "\tRegister updated\n");
168 if (val & PSR_EVENT_HDCP_ENABLE)
169 drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
170 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
171 drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
172 if (val & PSR_EVENT_VBI_ENABLE)
173 drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
174 if (val & PSR_EVENT_LPSP_MODE_EXIT)
175 drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
176 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
177 drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
180 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
182 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
183 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
184 ktime_t time_ns = ktime_get();
185 enum transcoder trans_shift;
188 if (DISPLAY_VER(dev_priv) >= 12) {
190 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
192 trans_shift = intel_dp->psr.transcoder;
193 imr_reg = EDP_PSR_IMR;
196 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
197 intel_dp->psr.last_entry_attempt = time_ns;
198 drm_dbg_kms(&dev_priv->drm,
199 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
200 transcoder_name(cpu_transcoder));
203 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
204 intel_dp->psr.last_exit = time_ns;
205 drm_dbg_kms(&dev_priv->drm,
206 "[transcoder %s] PSR exit completed\n",
207 transcoder_name(cpu_transcoder));
209 if (DISPLAY_VER(dev_priv) >= 9) {
210 u32 val = intel_de_read(dev_priv,
211 PSR_EVENT(cpu_transcoder));
212 bool psr2_enabled = intel_dp->psr.psr2_enabled;
214 intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
216 psr_event_print(dev_priv, val, psr2_enabled);
220 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
223 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
224 transcoder_name(cpu_transcoder));
226 intel_dp->psr.irq_aux_error = true;
229 * If this interruption is not masked it will keep
230 * interrupting so fast that it prevents the scheduled
232 * Also after a PSR error, we don't want to arm PSR
233 * again so we don't care about unmask the interruption
234 * or unset irq_aux_error.
236 val = intel_de_read(dev_priv, imr_reg);
237 val |= EDP_PSR_ERROR(trans_shift);
238 intel_de_write(dev_priv, imr_reg, val);
240 schedule_work(&intel_dp->psr.work);
244 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
248 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
251 return alpm_caps & DP_ALPM_CAP;
254 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
256 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
257 u8 val = 8; /* assume the worst if we can't read the value */
259 if (drm_dp_dpcd_readb(&intel_dp->aux,
260 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
261 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
263 drm_dbg_kms(&i915->drm,
264 "Unable to get sink synchronization latency, assuming 8 frames\n");
268 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
270 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
275 /* If sink don't have specific granularity requirements set legacy ones */
276 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
277 /* As PSR2 HW sends full lines, we do not care about x granularity */
283 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
285 drm_dbg_kms(&i915->drm,
286 "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
288 * Spec says that if the value read is 0 the default granularity should
291 if (r != 2 || w == 0)
294 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
296 drm_dbg_kms(&i915->drm,
297 "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
304 intel_dp->psr.su_w_granularity = w;
305 intel_dp->psr.su_y_granularity = y;
308 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
310 struct drm_i915_private *dev_priv =
311 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
313 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
314 sizeof(intel_dp->psr_dpcd));
316 if (!intel_dp->psr_dpcd[0])
318 drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
319 intel_dp->psr_dpcd[0]);
321 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
322 drm_dbg_kms(&dev_priv->drm,
323 "PSR support not currently available for this panel\n");
327 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
328 drm_dbg_kms(&dev_priv->drm,
329 "Panel lacks power state control, PSR cannot be enabled\n");
333 intel_dp->psr.sink_support = true;
334 intel_dp->psr.sink_sync_latency =
335 intel_dp_get_sink_sync_latency(intel_dp);
337 if (DISPLAY_VER(dev_priv) >= 9 &&
338 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
339 bool y_req = intel_dp->psr_dpcd[1] &
340 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
341 bool alpm = intel_dp_get_alpm_status(intel_dp);
344 * All panels that supports PSR version 03h (PSR2 +
345 * Y-coordinate) can handle Y-coordinates in VSC but we are
346 * only sure that it is going to be used when required by the
347 * panel. This way panel is capable to do selective update
348 * without a aux frame sync.
350 * To support PSR version 02h and PSR version 03h without
351 * Y-coordinate requirement panels we would need to enable
354 intel_dp->psr.sink_psr2_support = y_req && alpm;
355 drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
356 intel_dp->psr.sink_psr2_support ? "" : "not ");
358 if (intel_dp->psr.sink_psr2_support) {
359 intel_dp->psr.colorimetry_support =
360 intel_dp_get_colorimetry_status(intel_dp);
361 intel_dp_get_su_granularity(intel_dp);
366 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
368 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
369 u32 aux_clock_divider, aux_ctl;
371 static const u8 aux_msg[] = {
372 [0] = DP_AUX_NATIVE_WRITE << 4,
373 [1] = DP_SET_POWER >> 8,
374 [2] = DP_SET_POWER & 0xff,
376 [4] = DP_SET_POWER_D0,
378 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
379 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
380 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
381 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
383 BUILD_BUG_ON(sizeof(aux_msg) > 20);
384 for (i = 0; i < sizeof(aux_msg); i += 4)
385 intel_de_write(dev_priv,
386 EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
387 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
389 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
391 /* Start with bits set for DDI_AUX_CTL register */
392 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
395 /* Select only valid bits for SRD_AUX_CTL */
396 aux_ctl &= psr_aux_mask;
397 intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
401 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
403 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
404 u8 dpcd_val = DP_PSR_ENABLE;
406 /* Enable ALPM at sink for psr2 */
407 if (intel_dp->psr.psr2_enabled) {
408 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
410 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
412 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
414 if (intel_dp->psr.link_standby)
415 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
417 if (DISPLAY_VER(dev_priv) >= 8)
418 dpcd_val |= DP_PSR_CRC_VERIFICATION;
421 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
422 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
424 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
426 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
429 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
431 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
434 if (DISPLAY_VER(dev_priv) >= 11)
435 val |= EDP_PSR_TP4_TIME_0US;
437 if (dev_priv->params.psr_safest_params) {
438 val |= EDP_PSR_TP1_TIME_2500us;
439 val |= EDP_PSR_TP2_TP3_TIME_2500us;
443 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
444 val |= EDP_PSR_TP1_TIME_0us;
445 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
446 val |= EDP_PSR_TP1_TIME_100us;
447 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
448 val |= EDP_PSR_TP1_TIME_500us;
450 val |= EDP_PSR_TP1_TIME_2500us;
452 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
453 val |= EDP_PSR_TP2_TP3_TIME_0us;
454 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
455 val |= EDP_PSR_TP2_TP3_TIME_100us;
456 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
457 val |= EDP_PSR_TP2_TP3_TIME_500us;
459 val |= EDP_PSR_TP2_TP3_TIME_2500us;
462 if (intel_dp_source_supports_hbr2(intel_dp) &&
463 drm_dp_tps3_supported(intel_dp->dpcd))
464 val |= EDP_PSR_TP1_TP3_SEL;
466 val |= EDP_PSR_TP1_TP2_SEL;
471 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
473 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
476 /* Let's use 6 as the minimum to cover all known cases including the
477 * off-by-one issue that HW has in some cases.
479 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
480 idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
482 if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
488 static void hsw_activate_psr1(struct intel_dp *intel_dp)
490 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
491 u32 max_sleep_time = 0x1f;
492 u32 val = EDP_PSR_ENABLE;
494 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
496 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
497 if (IS_HASWELL(dev_priv))
498 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
500 if (intel_dp->psr.link_standby)
501 val |= EDP_PSR_LINK_STANDBY;
503 val |= intel_psr1_get_tp_time(intel_dp);
505 if (DISPLAY_VER(dev_priv) >= 8)
506 val |= EDP_PSR_CRC_ENABLE;
508 val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
509 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
510 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
513 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
515 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
518 if (dev_priv->params.psr_safest_params)
519 return EDP_PSR2_TP2_TIME_2500us;
521 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
522 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
523 val |= EDP_PSR2_TP2_TIME_50us;
524 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
525 val |= EDP_PSR2_TP2_TIME_100us;
526 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
527 val |= EDP_PSR2_TP2_TIME_500us;
529 val |= EDP_PSR2_TP2_TIME_2500us;
534 static void hsw_activate_psr2(struct intel_dp *intel_dp)
536 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
537 u32 val = EDP_PSR2_ENABLE;
539 val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
541 if (!IS_ALDERLAKE_P(dev_priv))
542 val |= EDP_SU_TRACK_ENABLE;
544 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
545 val |= EDP_Y_COORDINATE_ENABLE;
547 val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
548 val |= intel_psr2_get_tp_time(intel_dp);
550 /* Wa_22012278275:adl-p */
551 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
552 static const u8 map[] = {
563 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
564 * comments bellow for more information
568 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
570 tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
571 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
574 tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
575 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
577 } else if (DISPLAY_VER(dev_priv) >= 12) {
579 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
580 * values from BSpec. In order to setting an optimal power
581 * consumption, lower than 4k resoluition mode needs to decrese
582 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
583 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
585 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
586 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
587 val |= TGL_EDP_PSR2_FAST_WAKE(7);
588 } else if (DISPLAY_VER(dev_priv) >= 9) {
589 val |= EDP_PSR2_IO_BUFFER_WAKE(7);
590 val |= EDP_PSR2_FAST_WAKE(7);
593 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
594 val |= EDP_PSR2_SU_SDP_SCANLINE;
596 if (intel_dp->psr.psr2_sel_fetch_enabled) {
598 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
599 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
600 DIS_RAM_BYPASS_PSR2_MAN_TRACK,
601 DIS_RAM_BYPASS_PSR2_MAN_TRACK);
603 intel_de_write(dev_priv,
604 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
605 PSR2_MAN_TRK_CTL_ENABLE);
606 } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
607 intel_de_write(dev_priv,
608 PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
612 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
613 * recommending keep this bit unset while PSR2 is enabled.
615 intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
617 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
621 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
623 if (DISPLAY_VER(dev_priv) < 9)
625 else if (DISPLAY_VER(dev_priv) >= 12)
626 return trans == TRANSCODER_A;
628 return trans == TRANSCODER_EDP;
631 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
633 if (!cstate || !cstate->hw.active)
636 return DIV_ROUND_UP(1000 * 1000,
637 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
640 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
643 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
646 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
647 val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
648 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
650 intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
653 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
655 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
657 psr2_program_idle_frames(intel_dp, 0);
658 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
661 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
663 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
665 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
666 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
669 static void tgl_dc3co_disable_work(struct work_struct *work)
671 struct intel_dp *intel_dp =
672 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
674 mutex_lock(&intel_dp->psr.lock);
675 /* If delayed work is pending, it is not idle */
676 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
679 tgl_psr2_disable_dc3co(intel_dp);
681 mutex_unlock(&intel_dp->psr.lock);
684 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
686 if (!intel_dp->psr.dc3co_exitline)
689 cancel_delayed_work(&intel_dp->psr.dc3co_work);
690 /* Before PSR2 exit disallow dc3co*/
691 tgl_psr2_disable_dc3co(intel_dp);
695 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
696 struct intel_crtc_state *crtc_state)
698 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
699 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
701 enum port port = dig_port->base.port;
703 if (IS_ALDERLAKE_P(dev_priv))
704 return pipe <= PIPE_B && port <= PORT_B;
706 return pipe == PIPE_A && port == PORT_A;
710 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
711 struct intel_crtc_state *crtc_state)
713 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
714 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
718 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
719 * disable DC3CO until the changed dc3co activating/deactivating sequence
720 * is applied. B.Specs:49196
725 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
726 * TODO: when the issue is addressed, this restriction should be removed.
728 if (crtc_state->enable_psr2_sel_fetch)
731 if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
734 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
737 /* Wa_16011303918:adl-p */
738 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
742 * DC3CO Exit time 200us B.Spec 49196
743 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
746 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
748 if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
751 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
754 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
755 struct intel_crtc_state *crtc_state)
757 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
758 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
759 struct intel_plane_state *plane_state;
760 struct intel_plane *plane;
763 if (!dev_priv->params.enable_psr2_sel_fetch &&
764 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
765 drm_dbg_kms(&dev_priv->drm,
766 "PSR2 sel fetch not enabled, disabled by parameter\n");
770 if (crtc_state->uapi.async_flip) {
771 drm_dbg_kms(&dev_priv->drm,
772 "PSR2 sel fetch not enabled, async flip enabled\n");
776 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
777 if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
778 drm_dbg_kms(&dev_priv->drm,
779 "PSR2 sel fetch not enabled, plane rotated\n");
784 /* Wa_14010254185 Wa_14010103792 */
785 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
786 drm_dbg_kms(&dev_priv->drm,
787 "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
791 return crtc_state->enable_psr2_sel_fetch = true;
794 static bool psr2_granularity_check(struct intel_dp *intel_dp,
795 struct intel_crtc_state *crtc_state)
797 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
798 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
799 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
800 u16 y_granularity = 0;
802 /* PSR2 HW only send full lines so we only need to validate the width */
803 if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
806 if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
809 /* HW tracking is only aligned to 4 lines */
810 if (!crtc_state->enable_psr2_sel_fetch)
811 return intel_dp->psr.su_y_granularity == 4;
814 * adl_p has 1 line granularity. For other platforms with SW tracking we
815 * can adjust the y coordinates to match sink requirement if multiple of
818 if (IS_ALDERLAKE_P(dev_priv))
819 y_granularity = intel_dp->psr.su_y_granularity;
820 else if (intel_dp->psr.su_y_granularity <= 2)
822 else if ((intel_dp->psr.su_y_granularity % 4) == 0)
823 y_granularity = intel_dp->psr.su_y_granularity;
825 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
828 crtc_state->su_y_granularity = y_granularity;
832 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
833 struct intel_crtc_state *crtc_state)
835 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
836 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
837 u32 hblank_total, hblank_ns, req_ns;
839 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
840 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
842 /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
843 req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
845 if ((hblank_ns - req_ns) > 100)
848 if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
851 crtc_state->req_psr2_sdp_prior_scanline = true;
855 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
856 struct intel_crtc_state *crtc_state)
858 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
859 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
860 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
861 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
863 if (!intel_dp->psr.sink_psr2_support)
866 /* JSL and EHL only supports eDP 1.3 */
867 if (IS_JSL_EHL(dev_priv)) {
868 drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
873 if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
875 drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
880 * We are missing the implementation of some workarounds to enabled PSR2
881 * in Alderlake_P, until ready PSR2 should be kept disabled.
883 if (IS_ALDERLAKE_P(dev_priv)) {
884 drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
888 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
889 drm_dbg_kms(&dev_priv->drm,
890 "PSR2 not supported in transcoder %s\n",
891 transcoder_name(crtc_state->cpu_transcoder));
895 if (!psr2_global_enabled(intel_dp)) {
896 drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
901 * DSC and PSR2 cannot be enabled simultaneously. If a requested
902 * resolution requires DSC to be enabled, priority is given to DSC
905 if (crtc_state->dsc.compression_enable) {
906 drm_dbg_kms(&dev_priv->drm,
907 "PSR2 cannot be enabled since DSC is enabled\n");
911 if (crtc_state->crc_enabled) {
912 drm_dbg_kms(&dev_priv->drm,
913 "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
917 if (DISPLAY_VER(dev_priv) >= 12) {
921 } else if (DISPLAY_VER(dev_priv) >= 10) {
925 } else if (DISPLAY_VER(dev_priv) == 9) {
931 if (crtc_state->pipe_bpp > max_bpp) {
932 drm_dbg_kms(&dev_priv->drm,
933 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
934 crtc_state->pipe_bpp, max_bpp);
938 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
939 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
940 !HAS_PSR_HW_TRACKING(dev_priv)) {
941 drm_dbg_kms(&dev_priv->drm,
942 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
948 if (!crtc_state->enable_psr2_sel_fetch &&
949 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
950 drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
954 if (!psr2_granularity_check(intel_dp, crtc_state)) {
955 drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
959 if (!crtc_state->enable_psr2_sel_fetch &&
960 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
961 drm_dbg_kms(&dev_priv->drm,
962 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
963 crtc_hdisplay, crtc_vdisplay,
964 psr_max_h, psr_max_v);
968 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
969 drm_dbg_kms(&dev_priv->drm,
970 "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
974 /* Wa_16011303918:adl-p */
975 if (crtc_state->vrr.enable &&
976 IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
977 drm_dbg_kms(&dev_priv->drm,
978 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
982 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
986 void intel_psr_compute_config(struct intel_dp *intel_dp,
987 struct intel_crtc_state *crtc_state)
989 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
990 const struct drm_display_mode *adjusted_mode =
991 &crtc_state->hw.adjusted_mode;
995 * Current PSR panels dont work reliably with VRR enabled
996 * So if VRR is enabled, do not enable PSR.
998 if (crtc_state->vrr.enable)
1001 if (!CAN_PSR(intel_dp))
1004 if (!psr_global_enabled(intel_dp)) {
1005 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1009 if (intel_dp->psr.sink_not_reliable) {
1010 drm_dbg_kms(&dev_priv->drm,
1011 "PSR sink implementation is not reliable\n");
1015 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1016 drm_dbg_kms(&dev_priv->drm,
1017 "PSR condition failed: Interlaced mode enabled\n");
1021 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1022 if (psr_setup_time < 0) {
1023 drm_dbg_kms(&dev_priv->drm,
1024 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1025 intel_dp->psr_dpcd[1]);
1029 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1030 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1031 drm_dbg_kms(&dev_priv->drm,
1032 "PSR condition failed: PSR setup time (%d us) too long\n",
1037 crtc_state->has_psr = true;
1038 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1039 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1042 void intel_psr_get_config(struct intel_encoder *encoder,
1043 struct intel_crtc_state *pipe_config)
1045 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1046 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1047 struct intel_dp *intel_dp;
1053 intel_dp = &dig_port->dp;
1054 if (!CAN_PSR(intel_dp))
1057 mutex_lock(&intel_dp->psr.lock);
1058 if (!intel_dp->psr.enabled)
1062 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1063 * enabled/disabled because of frontbuffer tracking and others.
1065 pipe_config->has_psr = true;
1066 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1067 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1069 if (!intel_dp->psr.psr2_enabled)
1072 if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1073 val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
1074 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1075 pipe_config->enable_psr2_sel_fetch = true;
1078 if (DISPLAY_VER(dev_priv) >= 12) {
1079 val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
1080 val &= EXITLINE_MASK;
1081 pipe_config->dc3co_exitline = val;
1084 mutex_unlock(&intel_dp->psr.lock);
1087 static void intel_psr_activate(struct intel_dp *intel_dp)
1089 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1090 enum transcoder transcoder = intel_dp->psr.transcoder;
1092 if (transcoder_has_psr2(dev_priv, transcoder))
1093 drm_WARN_ON(&dev_priv->drm,
1094 intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
1096 drm_WARN_ON(&dev_priv->drm,
1097 intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
1098 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1099 lockdep_assert_held(&intel_dp->psr.lock);
1101 /* psr1 and psr2 are mutually exclusive.*/
1102 if (intel_dp->psr.psr2_enabled)
1103 hsw_activate_psr2(intel_dp);
1105 hsw_activate_psr1(intel_dp);
1107 intel_dp->psr.active = true;
1110 static void intel_psr_enable_source(struct intel_dp *intel_dp)
1112 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1113 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1116 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
1117 * use hardcoded values PSR AUX transactions
1119 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1120 hsw_psr_setup_aux(intel_dp);
1122 if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
1123 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
1124 u32 chicken = intel_de_read(dev_priv, reg);
1126 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
1127 PSR2_ADD_VERTICAL_LINE_COUNT;
1128 intel_de_write(dev_priv, reg, chicken);
1132 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1133 * mask LPSP to avoid dependency on other drivers that might block
1134 * runtime_pm besides preventing other hw tracking issues now we
1135 * can rely on frontbuffer tracking.
1137 mask = EDP_PSR_DEBUG_MASK_MEMUP |
1138 EDP_PSR_DEBUG_MASK_HPD |
1139 EDP_PSR_DEBUG_MASK_LPSP |
1140 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1142 if (DISPLAY_VER(dev_priv) < 11)
1143 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1145 intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1148 psr_irq_control(intel_dp);
1150 if (intel_dp->psr.dc3co_exitline) {
1154 * TODO: if future platforms supports DC3CO in more than one
1155 * transcoder, EXITLINE will need to be unset when disabling PSR
1157 val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1158 val &= ~EXITLINE_MASK;
1159 val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1160 val |= EXITLINE_ENABLE;
1161 intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
1164 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1165 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1166 intel_dp->psr.psr2_sel_fetch_enabled ?
1167 IGNORE_PSR2_HW_TRACKING : 0);
1169 /* Wa_16011168373:adl-p */
1170 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
1171 intel_dp->psr.psr2_enabled)
1172 intel_de_rmw(dev_priv,
1173 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1174 TRANS_SET_CONTEXT_LATENCY_MASK,
1175 TRANS_SET_CONTEXT_LATENCY_VALUE(1));
1178 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1180 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1184 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1185 * will still keep the error set even after the reset done in the
1186 * irq_preinstall and irq_uninstall hooks.
1187 * And enabling in this situation cause the screen to freeze in the
1188 * first time that PSR HW tries to activate so lets keep PSR disabled
1189 * to avoid any rendering problems.
1191 if (DISPLAY_VER(dev_priv) >= 12) {
1192 val = intel_de_read(dev_priv,
1193 TRANS_PSR_IIR(intel_dp->psr.transcoder));
1194 val &= EDP_PSR_ERROR(0);
1196 val = intel_de_read(dev_priv, EDP_PSR_IIR);
1197 val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1200 intel_dp->psr.sink_not_reliable = true;
1201 drm_dbg_kms(&dev_priv->drm,
1202 "PSR interruption error set, not enabling PSR\n");
1209 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1210 const struct intel_crtc_state *crtc_state,
1211 const struct drm_connector_state *conn_state)
1213 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1214 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1215 struct intel_encoder *encoder = &dig_port->base;
1218 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1220 intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1221 intel_dp->psr.busy_frontbuffer_bits = 0;
1222 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1223 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1224 /* DC5/DC6 requires at least 6 idle frames */
1225 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1226 intel_dp->psr.dc3co_exit_delay = val;
1227 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1228 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1229 intel_dp->psr.req_psr2_sdp_prior_scanline =
1230 crtc_state->req_psr2_sdp_prior_scanline;
1232 if (!psr_interrupt_error_check(intel_dp))
1235 drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1236 intel_dp->psr.psr2_enabled ? "2" : "1");
1237 intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1238 &intel_dp->psr.vsc);
1239 intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1240 intel_psr_enable_sink(intel_dp);
1241 intel_psr_enable_source(intel_dp);
1242 intel_dp->psr.enabled = true;
1243 intel_dp->psr.paused = false;
1245 intel_psr_activate(intel_dp);
1249 * intel_psr_enable - Enable PSR
1250 * @intel_dp: Intel DP
1251 * @crtc_state: new CRTC state
1252 * @conn_state: new CONNECTOR state
1254 * This function can only be called after the pipe is fully trained and enabled.
1256 void intel_psr_enable(struct intel_dp *intel_dp,
1257 const struct intel_crtc_state *crtc_state,
1258 const struct drm_connector_state *conn_state)
1260 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1262 if (!CAN_PSR(intel_dp))
1265 if (!crtc_state->has_psr)
1268 drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1270 mutex_lock(&intel_dp->psr.lock);
1271 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1272 mutex_unlock(&intel_dp->psr.lock);
1275 static void intel_psr_exit(struct intel_dp *intel_dp)
1277 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1280 if (!intel_dp->psr.active) {
1281 if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1282 val = intel_de_read(dev_priv,
1283 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1284 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1287 val = intel_de_read(dev_priv,
1288 EDP_PSR_CTL(intel_dp->psr.transcoder));
1289 drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1294 if (intel_dp->psr.psr2_enabled) {
1295 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1296 val = intel_de_read(dev_priv,
1297 EDP_PSR2_CTL(intel_dp->psr.transcoder));
1298 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1299 val &= ~EDP_PSR2_ENABLE;
1300 intel_de_write(dev_priv,
1301 EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1303 val = intel_de_read(dev_priv,
1304 EDP_PSR_CTL(intel_dp->psr.transcoder));
1305 drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1306 val &= ~EDP_PSR_ENABLE;
1307 intel_de_write(dev_priv,
1308 EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1310 intel_dp->psr.active = false;
1313 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1315 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1316 i915_reg_t psr_status;
1317 u32 psr_status_mask;
1319 if (intel_dp->psr.psr2_enabled) {
1320 psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1321 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1323 psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1324 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1327 /* Wait till PSR is idle */
1328 if (intel_de_wait_for_clear(dev_priv, psr_status,
1329 psr_status_mask, 2000))
1330 drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1333 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1335 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1337 lockdep_assert_held(&intel_dp->psr.lock);
1339 if (!intel_dp->psr.enabled)
1342 drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1343 intel_dp->psr.psr2_enabled ? "2" : "1");
1345 intel_psr_exit(intel_dp);
1346 intel_psr_wait_exit_locked(intel_dp);
1349 if (intel_dp->psr.psr2_sel_fetch_enabled &&
1350 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1351 intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
1352 DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
1354 /* Wa_16011168373:adl-p */
1355 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
1356 intel_dp->psr.psr2_enabled)
1357 intel_de_rmw(dev_priv,
1358 TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
1359 TRANS_SET_CONTEXT_LATENCY_MASK, 0);
1361 /* Disable PSR on Sink */
1362 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1364 if (intel_dp->psr.psr2_enabled)
1365 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1367 intel_dp->psr.enabled = false;
1371 * intel_psr_disable - Disable PSR
1372 * @intel_dp: Intel DP
1373 * @old_crtc_state: old CRTC state
1375 * This function needs to be called before disabling pipe.
1377 void intel_psr_disable(struct intel_dp *intel_dp,
1378 const struct intel_crtc_state *old_crtc_state)
1380 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1382 if (!old_crtc_state->has_psr)
1385 if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1388 mutex_lock(&intel_dp->psr.lock);
1390 intel_psr_disable_locked(intel_dp);
1392 mutex_unlock(&intel_dp->psr.lock);
1393 cancel_work_sync(&intel_dp->psr.work);
1394 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1398 * intel_psr_pause - Pause PSR
1399 * @intel_dp: Intel DP
1401 * This function need to be called after enabling psr.
1403 void intel_psr_pause(struct intel_dp *intel_dp)
1405 struct intel_psr *psr = &intel_dp->psr;
1407 if (!CAN_PSR(intel_dp))
1410 mutex_lock(&psr->lock);
1412 if (!psr->enabled) {
1413 mutex_unlock(&psr->lock);
1417 intel_psr_exit(intel_dp);
1418 intel_psr_wait_exit_locked(intel_dp);
1421 mutex_unlock(&psr->lock);
1423 cancel_work_sync(&psr->work);
1424 cancel_delayed_work_sync(&psr->dc3co_work);
1428 * intel_psr_resume - Resume PSR
1429 * @intel_dp: Intel DP
1431 * This function need to be called after pausing psr.
1433 void intel_psr_resume(struct intel_dp *intel_dp)
1435 struct intel_psr *psr = &intel_dp->psr;
1437 if (!CAN_PSR(intel_dp))
1440 mutex_lock(&psr->lock);
1445 psr->paused = false;
1446 intel_psr_activate(intel_dp);
1449 mutex_unlock(&psr->lock);
1452 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1454 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1456 if (DISPLAY_VER(dev_priv) >= 9)
1458 * Display WA #0884: skl+
1459 * This documented WA for bxt can be safely applied
1460 * broadly so we can force HW tracking to exit PSR
1461 * instead of disabling and re-enabling.
1462 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1463 * but it makes more sense write to the current active
1466 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1469 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
1470 * on older gens so doing the manual exit instead.
1472 intel_psr_exit(intel_dp);
1475 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
1476 const struct intel_crtc_state *crtc_state,
1477 const struct intel_plane_state *plane_state,
1480 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1481 enum pipe pipe = plane->pipe;
1482 const struct drm_rect *clip;
1486 if (!crtc_state->enable_psr2_sel_fetch)
1489 val = plane_state ? plane_state->ctl : 0;
1490 val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
1491 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
1492 if (!val || plane->id == PLANE_CURSOR)
1495 clip = &plane_state->psr2_sel_fetch_area;
1497 val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
1498 val |= plane_state->uapi.dst.x1;
1499 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
1501 /* TODO: consider auxiliary surfaces */
1502 x = plane_state->uapi.src.x1 >> 16;
1503 y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
1504 ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
1506 drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
1509 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
1512 /* Sizes are 0 based */
1513 val = (drm_rect_height(clip) - 1) << 16;
1514 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
1515 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1518 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1520 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1522 if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
1523 !crtc_state->enable_psr2_sel_fetch)
1526 intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
1527 crtc_state->psr2_man_track_ctl);
1530 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1531 struct drm_rect *clip, bool full_update)
1533 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1535 u32 val = PSR2_MAN_TRK_CTL_ENABLE;
1538 if (IS_ALDERLAKE_P(dev_priv))
1539 val |= ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1541 val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1549 if (IS_ALDERLAKE_P(dev_priv)) {
1550 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1551 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2);
1553 drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1555 val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1556 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1557 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1560 crtc_state->psr2_man_track_ctl = val;
1563 static void clip_area_update(struct drm_rect *overlap_damage_area,
1564 struct drm_rect *damage_area)
1566 if (overlap_damage_area->y1 == -1) {
1567 overlap_damage_area->y1 = damage_area->y1;
1568 overlap_damage_area->y2 = damage_area->y2;
1572 if (damage_area->y1 < overlap_damage_area->y1)
1573 overlap_damage_area->y1 = damage_area->y1;
1575 if (damage_area->y2 > overlap_damage_area->y2)
1576 overlap_damage_area->y2 = damage_area->y2;
1579 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
1580 struct drm_rect *pipe_clip)
1582 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1583 const u16 y_alignment = crtc_state->su_y_granularity;
1585 pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
1586 if (pipe_clip->y2 % y_alignment)
1587 pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
1589 if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
1590 drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
1593 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
1594 struct intel_crtc *crtc)
1596 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1597 struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1598 struct intel_plane_state *new_plane_state, *old_plane_state;
1599 struct intel_plane *plane;
1600 bool full_update = false;
1603 if (!crtc_state->enable_psr2_sel_fetch)
1606 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
1611 * Calculate minimal selective fetch area of each plane and calculate
1612 * the pipe damaged area.
1613 * In the next loop the plane selective fetch area will actually be set
1614 * using whole pipe damaged area.
1616 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1617 new_plane_state, i) {
1618 struct drm_rect src, damaged_area = { .y1 = -1 };
1619 struct drm_mode_rect *damaged_clips;
1622 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
1625 if (!new_plane_state->uapi.visible &&
1626 !old_plane_state->uapi.visible)
1630 * TODO: Not clear how to handle planes with negative position,
1631 * also planes are not updated if they have a negative X
1632 * position so for now doing a full update in this cases
1634 if (new_plane_state->uapi.dst.y1 < 0 ||
1635 new_plane_state->uapi.dst.x1 < 0) {
1640 num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1643 * If visibility or plane moved, mark the whole plane area as
1644 * damaged as it needs to be complete redraw in the new and old
1647 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
1648 !drm_rect_equals(&new_plane_state->uapi.dst,
1649 &old_plane_state->uapi.dst)) {
1650 if (old_plane_state->uapi.visible) {
1651 damaged_area.y1 = old_plane_state->uapi.dst.y1;
1652 damaged_area.y2 = old_plane_state->uapi.dst.y2;
1653 clip_area_update(&pipe_clip, &damaged_area);
1656 if (new_plane_state->uapi.visible) {
1657 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1658 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1659 clip_area_update(&pipe_clip, &damaged_area);
1662 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
1664 new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
1666 * If the plane don't have damaged areas but the
1667 * framebuffer changed or alpha changed, mark the whole
1668 * plane area as damaged.
1670 damaged_area.y1 = new_plane_state->uapi.dst.y1;
1671 damaged_area.y2 = new_plane_state->uapi.dst.y2;
1672 clip_area_update(&pipe_clip, &damaged_area);
1676 drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
1677 damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
1679 for (j = 0; j < num_clips; j++) {
1680 struct drm_rect clip;
1682 clip.x1 = damaged_clips[j].x1;
1683 clip.y1 = damaged_clips[j].y1;
1684 clip.x2 = damaged_clips[j].x2;
1685 clip.y2 = damaged_clips[j].y2;
1686 if (drm_rect_intersect(&clip, &src))
1687 clip_area_update(&damaged_area, &clip);
1690 if (damaged_area.y1 == -1)
1693 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
1694 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
1695 clip_area_update(&pipe_clip, &damaged_area);
1699 goto skip_sel_fetch_set_loop;
1701 intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
1704 * Now that we have the pipe damaged area check if it intersect with
1705 * every plane, if it does set the plane selective fetch area.
1707 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
1708 new_plane_state, i) {
1709 struct drm_rect *sel_fetch_area, inter;
1711 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
1712 !new_plane_state->uapi.visible)
1716 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
1719 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
1720 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
1721 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1724 skip_sel_fetch_set_loop:
1725 psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
1730 * intel_psr_update - Update PSR state
1731 * @intel_dp: Intel DP
1732 * @crtc_state: new CRTC state
1733 * @conn_state: new CONNECTOR state
1735 * This functions will update PSR states, disabling, enabling or switching PSR
1736 * version when executing fastsets. For full modeset, intel_psr_disable() and
1737 * intel_psr_enable() should be called instead.
1739 void intel_psr_update(struct intel_dp *intel_dp,
1740 const struct intel_crtc_state *crtc_state,
1741 const struct drm_connector_state *conn_state)
1743 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1744 struct intel_psr *psr = &intel_dp->psr;
1745 bool enable, psr2_enable;
1747 if (!CAN_PSR(intel_dp))
1750 mutex_lock(&intel_dp->psr.lock);
1752 enable = crtc_state->has_psr;
1753 psr2_enable = crtc_state->has_psr2;
1755 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
1756 crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1757 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1758 if (crtc_state->crc_enabled && psr->enabled)
1759 psr_force_hw_tracking_exit(intel_dp);
1760 else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
1762 * Activate PSR again after a force exit when enabling
1765 if (!intel_dp->psr.active &&
1766 !intel_dp->psr.busy_frontbuffer_bits)
1767 schedule_work(&intel_dp->psr.work);
1774 intel_psr_disable_locked(intel_dp);
1777 intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1780 mutex_unlock(&intel_dp->psr.lock);
1784 * psr_wait_for_idle - wait for PSR1 to idle
1785 * @intel_dp: Intel DP
1786 * @out_value: PSR status in case of failure
1788 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1791 static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1793 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1796 * From bspec: Panel Self Refresh (BDW+)
1797 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1798 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1799 * defensive enough to cover everything.
1801 return __intel_wait_for_register(&dev_priv->uncore,
1802 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1803 EDP_PSR_STATUS_STATE_MASK,
1804 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1809 * intel_psr_wait_for_idle - wait for PSR1 to idle
1810 * @new_crtc_state: new CRTC state
1812 * This function is expected to be called from pipe_update_start() where it is
1813 * not expected to race with PSR enable or disable.
1815 void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
1817 struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
1818 struct intel_encoder *encoder;
1820 if (!new_crtc_state->has_psr)
1823 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1824 new_crtc_state->uapi.encoder_mask) {
1825 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1828 mutex_lock(&intel_dp->psr.lock);
1829 if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
1830 mutex_unlock(&intel_dp->psr.lock);
1834 /* when the PSR1 is enabled */
1835 if (psr_wait_for_idle(intel_dp, &psr_status))
1836 drm_err(&dev_priv->drm,
1837 "PSR idle timed out 0x%x, atomic update may fail\n",
1839 mutex_unlock(&intel_dp->psr.lock);
1843 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
1845 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1850 if (!intel_dp->psr.enabled)
1853 if (intel_dp->psr.psr2_enabled) {
1854 reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1855 mask = EDP_PSR2_STATUS_STATE_MASK;
1857 reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1858 mask = EDP_PSR_STATUS_STATE_MASK;
1861 mutex_unlock(&intel_dp->psr.lock);
1863 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1865 drm_err(&dev_priv->drm,
1866 "Timed out waiting for PSR Idle for re-enable\n");
1868 /* After the unlocked wait, verify that PSR is still wanted! */
1869 mutex_lock(&intel_dp->psr.lock);
1870 return err == 0 && intel_dp->psr.enabled;
1873 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1875 struct drm_connector_list_iter conn_iter;
1876 struct drm_device *dev = &dev_priv->drm;
1877 struct drm_modeset_acquire_ctx ctx;
1878 struct drm_atomic_state *state;
1879 struct drm_connector *conn;
1882 state = drm_atomic_state_alloc(dev);
1886 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1887 state->acquire_ctx = &ctx;
1891 drm_connector_list_iter_begin(dev, &conn_iter);
1892 drm_for_each_connector_iter(conn, &conn_iter) {
1893 struct drm_connector_state *conn_state;
1894 struct drm_crtc_state *crtc_state;
1896 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
1899 conn_state = drm_atomic_get_connector_state(state, conn);
1900 if (IS_ERR(conn_state)) {
1901 err = PTR_ERR(conn_state);
1905 if (!conn_state->crtc)
1908 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
1909 if (IS_ERR(crtc_state)) {
1910 err = PTR_ERR(crtc_state);
1914 /* Mark mode as changed to trigger a pipe->update() */
1915 crtc_state->mode_changed = true;
1917 drm_connector_list_iter_end(&conn_iter);
1920 err = drm_atomic_commit(state);
1922 if (err == -EDEADLK) {
1923 drm_atomic_state_clear(state);
1924 err = drm_modeset_backoff(&ctx);
1929 drm_modeset_drop_locks(&ctx);
1930 drm_modeset_acquire_fini(&ctx);
1931 drm_atomic_state_put(state);
1936 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1938 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1939 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1943 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1944 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1945 drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1949 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1953 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1954 intel_dp->psr.debug = val;
1957 * Do it right away if it's already enabled, otherwise it will be done
1958 * when enabling the source.
1960 if (intel_dp->psr.enabled)
1961 psr_irq_control(intel_dp);
1963 mutex_unlock(&intel_dp->psr.lock);
1965 if (old_mode != mode)
1966 ret = intel_psr_fastset_force(dev_priv);
1971 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1973 struct intel_psr *psr = &intel_dp->psr;
1975 intel_psr_disable_locked(intel_dp);
1976 psr->sink_not_reliable = true;
1977 /* let's make sure that sink is awaken */
1978 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1981 static void intel_psr_work(struct work_struct *work)
1983 struct intel_dp *intel_dp =
1984 container_of(work, typeof(*intel_dp), psr.work);
1986 mutex_lock(&intel_dp->psr.lock);
1988 if (!intel_dp->psr.enabled)
1991 if (READ_ONCE(intel_dp->psr.irq_aux_error))
1992 intel_psr_handle_irq(intel_dp);
1995 * We have to make sure PSR is ready for re-enable
1996 * otherwise it keeps disabled until next full enable/disable cycle.
1997 * PSR might take some time to get fully disabled
1998 * and be ready for re-enable.
2000 if (!__psr_wait_for_idle_locked(intel_dp))
2004 * The delayed work can race with an invalidate hence we need to
2005 * recheck. Since psr_flush first clears this and then reschedules we
2006 * won't ever miss a flush when bailing out here.
2008 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2011 intel_psr_activate(intel_dp);
2013 mutex_unlock(&intel_dp->psr.lock);
2017 * intel_psr_invalidate - Invalidade PSR
2018 * @dev_priv: i915 device
2019 * @frontbuffer_bits: frontbuffer plane tracking bits
2020 * @origin: which operation caused the invalidate
2022 * Since the hardware frontbuffer tracking has gaps we need to integrate
2023 * with the software frontbuffer tracking. This function gets called every
2024 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2025 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2027 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2029 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2030 unsigned frontbuffer_bits, enum fb_op_origin origin)
2032 struct intel_encoder *encoder;
2034 if (origin == ORIGIN_FLIP)
2037 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2038 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2039 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2041 mutex_lock(&intel_dp->psr.lock);
2042 if (!intel_dp->psr.enabled) {
2043 mutex_unlock(&intel_dp->psr.lock);
2047 pipe_frontbuffer_bits &=
2048 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2049 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2051 if (pipe_frontbuffer_bits)
2052 intel_psr_exit(intel_dp);
2054 mutex_unlock(&intel_dp->psr.lock);
2058 * When we will be completely rely on PSR2 S/W tracking in future,
2059 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2060 * event also therefore tgl_dc3co_flush() require to be changed
2061 * accordingly in future.
2064 tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2065 enum fb_op_origin origin)
2067 mutex_lock(&intel_dp->psr.lock);
2069 if (!intel_dp->psr.dc3co_exitline)
2072 if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
2076 * At every frontbuffer flush flip event modified delay of delayed work,
2077 * when delayed work schedules that means display has been idle.
2079 if (!(frontbuffer_bits &
2080 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2083 tgl_psr2_enable_dc3co(intel_dp);
2084 mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
2085 intel_dp->psr.dc3co_exit_delay);
2088 mutex_unlock(&intel_dp->psr.lock);
2092 * intel_psr_flush - Flush PSR
2093 * @dev_priv: i915 device
2094 * @frontbuffer_bits: frontbuffer plane tracking bits
2095 * @origin: which operation caused the flush
2097 * Since the hardware frontbuffer tracking has gaps we need to integrate
2098 * with the software frontbuffer tracking. This function gets called every
2099 * time frontbuffer rendering has completed and flushed out to memory. PSR
2100 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2102 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2104 void intel_psr_flush(struct drm_i915_private *dev_priv,
2105 unsigned frontbuffer_bits, enum fb_op_origin origin)
2107 struct intel_encoder *encoder;
2109 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2110 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2111 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2113 if (origin == ORIGIN_FLIP) {
2114 tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
2118 mutex_lock(&intel_dp->psr.lock);
2119 if (!intel_dp->psr.enabled) {
2120 mutex_unlock(&intel_dp->psr.lock);
2124 pipe_frontbuffer_bits &=
2125 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2126 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2129 * If the PSR is paused by an explicit intel_psr_paused() call,
2130 * we have to ensure that the PSR is not activated until
2131 * intel_psr_resume() is called.
2133 if (intel_dp->psr.paused) {
2134 mutex_unlock(&intel_dp->psr.lock);
2138 /* By definition flush = invalidate + flush */
2139 if (pipe_frontbuffer_bits)
2140 psr_force_hw_tracking_exit(intel_dp);
2142 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2143 schedule_work(&intel_dp->psr.work);
2144 mutex_unlock(&intel_dp->psr.lock);
2149 * intel_psr_init - Init basic PSR work and mutex.
2150 * @intel_dp: Intel DP
2152 * This function is called after the initializing connector.
2153 * (the initializing of connector treats the handling of connector capabilities)
2154 * And it initializes basic PSR stuff for each DP Encoder.
2156 void intel_psr_init(struct intel_dp *intel_dp)
2158 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2159 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2161 if (!HAS_PSR(dev_priv))
2165 * HSW spec explicitly says PSR is tied to port A.
2166 * BDW+ platforms have a instance of PSR registers per transcoder but
2167 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2169 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2170 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2171 * But GEN12 supports a instance of PSR registers per transcoder.
2173 if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2174 drm_dbg_kms(&dev_priv->drm,
2175 "PSR condition failed: Port not supported\n");
2179 intel_dp->psr.source_support = true;
2181 if (IS_HASWELL(dev_priv))
2183 * HSW don't have PSR registers on the same space as transcoder
2184 * so set this to a value that when subtract to the register
2185 * in transcoder space results in the right offset for HSW
2187 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
2189 if (dev_priv->params.enable_psr == -1)
2190 if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
2191 dev_priv->params.enable_psr = 0;
2193 /* Set link_standby x link_off defaults */
2194 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2195 /* HSW and BDW require workarounds that we don't implement. */
2196 intel_dp->psr.link_standby = false;
2197 else if (DISPLAY_VER(dev_priv) < 12)
2198 /* For new platforms up to TGL let's respect VBT back again */
2199 intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
2201 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2202 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2203 mutex_init(&intel_dp->psr.lock);
2206 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2207 u8 *status, u8 *error_status)
2209 struct drm_dp_aux *aux = &intel_dp->aux;
2212 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2216 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2220 *status = *status & DP_PSR_SINK_STATE_MASK;
2225 static void psr_alpm_check(struct intel_dp *intel_dp)
2227 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2228 struct drm_dp_aux *aux = &intel_dp->aux;
2229 struct intel_psr *psr = &intel_dp->psr;
2233 if (!psr->psr2_enabled)
2236 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2238 drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2242 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2243 intel_psr_disable_locked(intel_dp);
2244 psr->sink_not_reliable = true;
2245 drm_dbg_kms(&dev_priv->drm,
2246 "ALPM lock timeout error, disabling PSR\n");
2248 /* Clearing error */
2249 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2253 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2255 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2256 struct intel_psr *psr = &intel_dp->psr;
2260 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2262 drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2266 if (val & DP_PSR_CAPS_CHANGE) {
2267 intel_psr_disable_locked(intel_dp);
2268 psr->sink_not_reliable = true;
2269 drm_dbg_kms(&dev_priv->drm,
2270 "Sink PSR capability changed, disabling PSR\n");
2273 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2277 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2279 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2280 struct intel_psr *psr = &intel_dp->psr;
2281 u8 status, error_status;
2282 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2283 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2284 DP_PSR_LINK_CRC_ERROR;
2286 if (!CAN_PSR(intel_dp))
2289 mutex_lock(&psr->lock);
2294 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2295 drm_err(&dev_priv->drm,
2296 "Error reading PSR status or error status\n");
2300 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2301 intel_psr_disable_locked(intel_dp);
2302 psr->sink_not_reliable = true;
2305 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2306 drm_dbg_kms(&dev_priv->drm,
2307 "PSR sink internal error, disabling PSR\n");
2308 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2309 drm_dbg_kms(&dev_priv->drm,
2310 "PSR RFB storage error, disabling PSR\n");
2311 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2312 drm_dbg_kms(&dev_priv->drm,
2313 "PSR VSC SDP uncorrectable error, disabling PSR\n");
2314 if (error_status & DP_PSR_LINK_CRC_ERROR)
2315 drm_dbg_kms(&dev_priv->drm,
2316 "PSR Link CRC error, disabling PSR\n");
2318 if (error_status & ~errors)
2319 drm_err(&dev_priv->drm,
2320 "PSR_ERROR_STATUS unhandled errors %x\n",
2321 error_status & ~errors);
2322 /* clear status register */
2323 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2325 psr_alpm_check(intel_dp);
2326 psr_capability_changed_check(intel_dp);
2329 mutex_unlock(&psr->lock);
2332 bool intel_psr_enabled(struct intel_dp *intel_dp)
2336 if (!CAN_PSR(intel_dp))
2339 mutex_lock(&intel_dp->psr.lock);
2340 ret = intel_dp->psr.enabled;
2341 mutex_unlock(&intel_dp->psr.lock);