intel_sbi_init(dev_priv);
vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
- cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
vlv_iosf_sb_fini(dev_priv);
intel_sbi_fini(dev_priv);
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
+ struct pm_qos_request qos;
} vlv_iosf_sb;
/* Sideband mailbox protection */
struct mutex sb_lock;
- struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
{
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
mutex_init(&i915->vlv_iosf_sb.lock);
+
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
}
void vlv_iosf_sb_fini(struct drm_i915_private *i915)
{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
mutex_destroy(&i915->vlv_iosf_sb.lock);
}