* use page flips.
*/
-static bool psr_global_enabled(struct drm_i915_private *i915)
+static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
}
}
-static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
}
}
-static void psr_irq_control(struct drm_i915_private *dev_priv)
+static void psr_irq_control(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder trans_shift;
- u32 mask, val;
i915_reg_t imr_reg;
+ u32 mask, val;
/*
* gen12+ has registers relative to transcoder and one per transcoder
*/
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
mask = EDP_PSR_ERROR(trans_shift);
- if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
mask |= EDP_PSR_POST_EXIT(trans_shift) |
EDP_PSR_PRE_ENTRY(trans_shift);
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
}
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ ktime_t time_ns = ktime_get();
enum transcoder trans_shift;
i915_reg_t imr_reg;
- ktime_t time_ns = ktime_get();
if (INTEL_GEN(dev_priv) >= 12) {
trans_shift = 0;
- imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
} else {
- trans_shift = dev_priv->psr.transcoder;
+ trans_shift = intel_dp->psr.transcoder;
imr_reg = EDP_PSR_IMR;
}
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
+ intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
- dev_priv->psr.last_exit = time_ns;
+ intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 9) {
u32 val = intel_de_read(dev_priv,
PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ bool psr2_enabled = intel_dp->psr.psr2_enabled;
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
val);
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ intel_dp->psr.irq_aux_error = true;
/*
* If this interruption is not masked it will keep
val |= EDP_PSR_ERROR(trans_shift);
intel_de_write(dev_priv, imr_reg, val);
- schedule_work(&dev_priv->psr.work);
+ schedule_work(&intel_dp->psr.work);
}
}
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- if (dev_priv->psr.dp) {
- drm_warn(&dev_priv->drm,
- "More than one eDP panel found, PSR support should be extended\n");
- return;
- }
-
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
return;
}
- dev_priv->psr.sink_support = true;
- dev_priv->psr.sink_sync_latency =
+ intel_dp->psr.sink_support = true;
+ intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- dev_priv->psr.dp = intel_dp;
-
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
bool y_req = intel_dp->psr_dpcd[1] &
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
- dev_priv->psr.sink_psr2_support = y_req && alpm;
+ intel_dp->psr.sink_psr2_support = y_req && alpm;
drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ intel_dp->psr.sink_psr2_support ? "" : "not ");
- if (dev_priv->psr.sink_psr2_support) {
- dev_priv->psr.colorimetry_support =
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.su_x_granularity =
+ intel_dp->psr.su_x_granularity =
intel_dp_get_su_x_granulartiy(intel_dp);
}
}
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
aux_ctl);
}
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_enabled) {
+ if (intel_dp->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
if (INTEL_GEN(dev_priv) >= 8)
* off-by-one issue that HW has in some cases.
*/
idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
idle_frames = 0xf;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby)
+ if (intel_dp->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
val |= intel_psr1_get_tp_time(intel_dp);
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
val |= intel_psr2_get_tp_time(intel_dp);
if (INTEL_GEN(dev_priv) >= 12) {
val |= EDP_PSR2_FAST_WAKE(7);
}
- if (dev_priv->psr.psr2_sel_fetch_enabled) {
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* WA 1408330847 */
if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
PSR2_MAN_TRK_CTL_ENABLE);
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
}
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
static bool
drm_mode_vrefresh(&cstate->hw.adjusted_mode));
}
-static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
}
-static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- psr2_program_idle_frames(dev_priv, 0);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
-static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
+ psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
static void tgl_dc3co_disable_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.dc3co_work))
+ if (delayed_work_pending(&intel_dp->psr.dc3co_work))
goto unlock;
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.dc3co_work);
+ cancel_delayed_work(&intel_dp->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
- tgl_psr2_disable_dc3co(dev_priv);
+ tgl_psr2_disable_dc3co(intel_dp);
}
static void
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!dev_priv->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support)
return false;
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
return false;
}
- if (!psr2_global_enabled(dev_priv)) {
+ if (!psr2_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
return false;
}
* only need to validate the SU block width is a multiple of
* x granularity.
*/
- if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ crtc_hdisplay, intel_dp->psr.su_x_granularity);
return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
if (crtc_state->vrr.enable)
return;
- if (!CAN_PSR(dev_priv))
+ if (!CAN_PSR(intel_dp))
return;
- if (intel_dp != dev_priv->psr.dp)
- return;
-
- if (!psr_global_enabled(dev_priv)) {
+ if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
return;
}
- /*
- * HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms have a instance of PSR registers per transcoder but
- * for now it only supports one instance of PSR, so lets keep it
- * hardcoded to PORT_A
- */
- if (dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Port not supported\n");
- return;
- }
-
- if (dev_priv->psr.sink_not_reliable) {
+ if (intel_dp->psr.sink_not_reliable) {
drm_dbg_kms(&dev_priv->drm,
"PSR sink implementation is not reliable\n");
return;
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder transcoder = intel_dp->psr.transcoder;
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ if (transcoder_has_psr2(dev_priv, transcoder))
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
- lockdep_assert_held(&dev_priv->psr.lock);
+ intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ lockdep_assert_held(&intel_dp->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
- dev_priv->psr.active = true;
+ intel_dp->psr.active = true;
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+ if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
mask);
- psr_irq_control(dev_priv);
+ psr_irq_control(intel_dp);
if (crtc_state->dc3co_exitline) {
u32 val;
if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
- dev_priv->psr.psr2_sel_fetch_enabled ?
+ intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
}
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_dp *intel_dp = dev_priv->psr.dp;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
u32 val;
- drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
- dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.busy_frontbuffer_bits = 0;
+ intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
/* DC5/DC6 requires at least 6 idle frames */
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
- dev_priv->psr.dc3co_exit_delay = val;
- dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+ intel_dp->psr.dc3co_exit_delay = val;
+ intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
*/
if (INTEL_GEN(dev_priv) >= 12) {
val = intel_de_read(dev_priv,
- TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ TRANS_PSR_IIR(intel_dp->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
if (val) {
- dev_priv->psr.sink_not_reliable = true;
+ intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
"PSR interruption error set, not enabling PSR\n");
return;
}
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &dev_priv->psr.vsc);
- intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
+ &intel_dp->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
- dev_priv->psr.enabled = true;
+ intel_dp->psr.enabled = true;
intel_psr_activate(intel_dp);
}
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
if (!crtc_state->has_psr)
drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
- mutex_lock(&dev_priv->psr.lock);
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+ mutex_unlock(&intel_dp->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- if (!dev_priv->psr.active) {
- if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ if (!intel_dp->psr.active) {
+ if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
- if (dev_priv->psr.psr2_enabled) {
- tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ if (intel_dp->psr.psr2_enabled) {
+ tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ EDP_PSR2_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
} else {
val = intel_de_read(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder));
+ EDP_PSR_CTL(intel_dp->psr.transcoder));
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
intel_de_write(dev_priv,
- EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
- dev_priv->psr.active = false;
+ intel_dp->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
i915_reg_t psr_status;
u32 psr_status_mask;
- lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return;
drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* WA 1408330847 */
- if (dev_priv->psr.psr2_sel_fetch_enabled &&
+ if (intel_dp->psr.psr2_sel_fetch_enabled &&
(IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- if (dev_priv->psr.psr2_enabled)
+ if (intel_dp->psr.psr2_enabled)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
- dev_priv->psr.enabled = false;
+ intel_dp->psr.enabled = false;
}
/**
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
intel_psr_disable_locked(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
- cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
+ mutex_unlock(&intel_dp->psr.lock);
+ cancel_work_sync(&intel_dp->psr.work);
+ cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (IS_TIGERLAKE(dev_priv))
/*
* Writes to CURSURFLIVE in TGL are causing IOMMU errors and
* So using this workaround until this issue is root caused
* and a better fix is found.
*/
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
else if (INTEL_GEN(dev_priv) >= 9)
/*
* Display WA #0884: skl+
* but it makes more sense write to the current active
* pipe.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
* on older gens so doing the manual exit instead.
*/
- intel_psr_exit(dev_priv);
+ intel_psr_exit(intel_dp);
}
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct i915_psr *psr = &dev_priv->psr;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
!crtc_state->enable_psr2_sel_fetch)
return;
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
- crtc_state->psr2_man_track_ctl);
+ for_each_intel_encoder_mask_can_psr(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp->psr.enabled)
+ continue;
+
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
+ crtc_state->psr2_man_track_ctl);
+ }
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
bool enable, psr2_enable;
- if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ if (!CAN_PSR(intel_dp))
return;
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
enable = crtc_state->has_psr;
psr2_enable = crtc_state->has_psr2;
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
- psr_force_hw_tracking_exit(dev_priv);
+ psr_force_hw_tracking_exit(intel_dp);
else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
/*
* Activate PSR again after a force exit when enabling
* CRC in older gens
*/
- if (!dev_priv->psr.active &&
- !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
+ if (!intel_dp->psr.active &&
+ !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
}
goto unlock;
intel_psr_disable_locked(intel_dp);
if (enable)
- intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
+ intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
+ * psr_wait_for_idle - wait for PSR1 to idle
+ * @intel_dp: Intel DP
* @out_value: PSR status in case of failure
*
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
* Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ *
*/
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value)
+static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
- return 0;
-
- /* FIXME: Update this for PSR2 if we need to wait for idle */
- if (READ_ONCE(dev_priv->psr.psr2_enabled))
- return 0;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/*
* From bspec: Panel Self Refresh (BDW+)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
-
return __intel_wait_for_register(&dev_priv->uncore,
- EDP_PSR_STATUS(dev_priv->psr.transcoder),
+ EDP_PSR_STATUS(intel_dp->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
}
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_encoder *encoder;
+
+ if (!new_crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_can_psr(&dev_priv->drm, encoder,
+ new_crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ u32 psr_status;
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled ||
+ (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
+ /* when the PSR1 is enabled */
+ if (psr_wait_for_idle(intel_dp, &psr_status))
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t reg;
u32 mask;
int err;
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
return false;
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+ if (intel_dp->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+ reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
- mutex_lock(&dev_priv->psr.lock);
- return err == 0 && dev_priv->psr.enabled;
+ mutex_lock(&intel_dp->psr.lock);
+ return err == 0 && intel_dp->psr.enabled;
}
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
return err;
}
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
u32 old_mode;
int ret;
return -EINVAL;
}
- ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+ ret = mutex_lock_interruptible(&intel_dp->psr.lock);
if (ret)
return ret;
- old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
- dev_priv->psr.debug = val;
+ old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+ intel_dp->psr.debug = val;
/*
* Do it right away if it's already enabled, otherwise it will be done
* when enabling the source.
*/
- if (dev_priv->psr.enabled)
- psr_irq_control(dev_priv);
+ if (intel_dp->psr.enabled)
+ psr_irq_control(intel_dp);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode)
ret = intel_psr_fastset_force(dev_priv);
return ret;
}
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
{
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
- intel_psr_disable_locked(psr->dp);
+ intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
/* let's make sure that sink is awaken */
- drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
static void intel_psr_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work);
+ struct intel_dp *intel_dp =
+ container_of(work, typeof(*intel_dp), psr.work);
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.enabled)
+ if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(dev_priv->psr.irq_aux_error))
- intel_psr_handle_irq(dev_priv);
+ if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ intel_psr_handle_irq(intel_dp);
/*
* We have to make sure PSR is ready for re-enable
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (!__psr_wait_for_idle_locked(dev_priv))
+ if (!__psr_wait_for_idle_locked(intel_dp))
goto unlock;
/*
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+ if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
goto unlock;
- intel_psr_activate(dev_priv->psr.dp);
+ intel_psr_activate(intel_dp);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
if (origin == ORIGIN_FLIP)
return;
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ for_each_intel_encoder_can_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
- if (frontbuffer_bits)
- intel_psr_exit(dev_priv);
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
- mutex_unlock(&dev_priv->psr.lock);
-}
+ if (pipe_frontbuffer_bits)
+ intel_psr_exit(intel_dp);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
+}
/*
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
* accordingly in future.
*/
static void
-tgl_dc3co_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin)
+tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
- mutex_lock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
- if (!dev_priv->psr.dc3co_enabled)
+ if (!intel_dp->psr.dc3co_enabled)
goto unlock;
- if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
goto unlock;
/*
* when delayed work schedules that means display has been idle.
*/
if (!(frontbuffer_bits &
- INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
goto unlock;
- tgl_psr2_enable_dc3co(dev_priv);
- mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
- dev_priv->psr.dc3co_exit_delay);
+ tgl_psr2_enable_dc3co(intel_dp);
+ mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+ intel_dp->psr.dc3co_exit_delay);
unlock:
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_unlock(&intel_dp->psr.lock);
}
/**
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- if (!CAN_PSR(dev_priv))
- return;
+ struct intel_encoder *encoder;
- if (origin == ORIGIN_FLIP) {
- tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
- return;
- }
+ for_each_intel_encoder_can_psr(&dev_priv->drm, encoder) {
+ unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
+ continue;
+ }
+
+ mutex_lock(&intel_dp->psr.lock);
+ if (!intel_dp->psr.enabled) {
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
- dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+ pipe_frontbuffer_bits &=
+ INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+ intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
- /* By definition flush = invalidate + flush */
- if (frontbuffer_bits)
- psr_force_hw_tracking_exit(dev_priv);
+ /* By definition flush = invalidate + flush */
+ if (pipe_frontbuffer_bits)
+ psr_force_hw_tracking_exit(intel_dp);
- if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- schedule_work(&dev_priv->psr.work);
- mutex_unlock(&dev_priv->psr.lock);
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+ schedule_work(&intel_dp->psr.work);
+ mutex_unlock(&intel_dp->psr.lock);
+ }
}
/**
* intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @intel_dp: Intel DP
*
- * This function is called only once at driver load to initialize basic
- * PSR stuff.
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
*/
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
if (!HAS_PSR(dev_priv))
return;
- if (!dev_priv->psr.sink_support)
+ if (!intel_dp->psr.sink_support)
return;
+ /*
+ * HSW spec explicitly says PSR is tied to port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+ * than eDP one.
+ * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+ * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+ * But GEN12 supports a instance of PSR registers per transcoder.
+ */
+ if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
+ return;
+ }
+
+ intel_dp->psr.source_support = true;
+
if (IS_HASWELL(dev_priv))
/*
* HSW don't have PSR registers on the same space as transcoder
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
- dev_priv->psr.link_standby = false;
+ intel_dp->psr.link_standby = false;
else if (INTEL_GEN(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
- dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+ intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
- INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
- mutex_init(&dev_priv->psr.lock);
+ INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+ mutex_init(&intel_dp->psr.lock);
}
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_psr *psr = &dev_priv->psr;
+ struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
return;
mutex_lock(&psr->lock);
- if (!psr->enabled || psr->dp != intel_dp)
+ if (!psr->enabled)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
bool intel_psr_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bool ret;
- if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
return false;
- mutex_lock(&dev_priv->psr.lock);
- ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
- mutex_unlock(&dev_priv->psr.lock);
+ mutex_lock(&intel_dp->psr.lock);
+ ret = intel_dp->psr.enabled;
+ mutex_unlock(&intel_dp->psr.lock);
return ret;
}