1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
40 * DOC: interrupt handling
42 * These functions provide the basic support for enabling and disabling the
43 * interrupt handling support. There's a lot more functionality in i915_irq.c
44 * and related files, but that will be described in separate chapters.
47 static const u32 hpd_ilk[HPD_NUM_PINS] = {
48 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
51 static const u32 hpd_ivb[HPD_NUM_PINS] = {
52 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
55 static const u32 hpd_bdw[HPD_NUM_PINS] = {
56 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
59 static const u32 hpd_ibx[HPD_NUM_PINS] = {
60 [HPD_CRT] = SDE_CRT_HOTPLUG,
61 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
62 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
63 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
64 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
67 static const u32 hpd_cpt[HPD_NUM_PINS] = {
68 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
69 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
70 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
71 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
72 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
75 static const u32 hpd_spt[HPD_NUM_PINS] = {
76 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
77 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
78 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
79 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
80 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
83 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
84 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
85 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
86 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
87 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
88 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
89 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
92 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
93 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
94 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
95 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
96 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
97 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
98 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
101 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
102 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
103 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
104 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
105 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
106 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
107 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111 static const u32 hpd_bxt[HPD_NUM_PINS] = {
112 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
113 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
114 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
117 static const u32 hpd_gen11[HPD_NUM_PINS] = {
118 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
119 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
120 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
121 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
124 static const u32 hpd_icp[HPD_NUM_PINS] = {
125 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
126 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
127 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
128 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
129 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
130 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
133 /* IIR can theoretically queue up two events. Be paranoid. */
134 #define GEN8_IRQ_RESET_NDX(type, which) do { \
135 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
136 POSTING_READ(GEN8_##type##_IMR(which)); \
137 I915_WRITE(GEN8_##type##_IER(which), 0); \
138 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
139 POSTING_READ(GEN8_##type##_IIR(which)); \
140 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
141 POSTING_READ(GEN8_##type##_IIR(which)); \
144 #define GEN3_IRQ_RESET(type) do { \
145 I915_WRITE(type##IMR, 0xffffffff); \
146 POSTING_READ(type##IMR); \
147 I915_WRITE(type##IER, 0); \
148 I915_WRITE(type##IIR, 0xffffffff); \
149 POSTING_READ(type##IIR); \
150 I915_WRITE(type##IIR, 0xffffffff); \
151 POSTING_READ(type##IIR); \
154 #define GEN2_IRQ_RESET(type) do { \
155 I915_WRITE16(type##IMR, 0xffff); \
156 POSTING_READ16(type##IMR); \
157 I915_WRITE16(type##IER, 0); \
158 I915_WRITE16(type##IIR, 0xffff); \
159 POSTING_READ16(type##IIR); \
160 I915_WRITE16(type##IIR, 0xffff); \
161 POSTING_READ16(type##IIR); \
165 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
167 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
170 u32 val = I915_READ(reg);
175 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
176 i915_mmio_reg_offset(reg), val);
177 I915_WRITE(reg, 0xffffffff);
179 I915_WRITE(reg, 0xffffffff);
183 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
186 u16 val = I915_READ16(reg);
191 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
192 i915_mmio_reg_offset(reg), val);
193 I915_WRITE16(reg, 0xffff);
195 I915_WRITE16(reg, 0xffff);
199 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
200 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
201 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
202 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
203 POSTING_READ(GEN8_##type##_IMR(which)); \
206 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
207 gen3_assert_iir_is_zero(dev_priv, type##IIR); \
208 I915_WRITE(type##IER, (ier_val)); \
209 I915_WRITE(type##IMR, (imr_val)); \
210 POSTING_READ(type##IMR); \
213 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
214 gen2_assert_iir_is_zero(dev_priv, type##IIR); \
215 I915_WRITE16(type##IER, (ier_val)); \
216 I915_WRITE16(type##IMR, (imr_val)); \
217 POSTING_READ16(type##IMR); \
220 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
221 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
223 /* For display hotplug interrupt */
225 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
231 lockdep_assert_held(&dev_priv->irq_lock);
232 WARN_ON(bits & ~mask);
234 val = I915_READ(PORT_HOTPLUG_EN);
237 I915_WRITE(PORT_HOTPLUG_EN, val);
241 * i915_hotplug_interrupt_update - update hotplug interrupt enable
242 * @dev_priv: driver private
243 * @mask: bits to update
244 * @bits: bits to enable
245 * NOTE: the HPD enable bits are modified both inside and outside
246 * of an interrupt context. To avoid that read-modify-write cycles
247 * interfer, these bits are protected by a spinlock. Since this
248 * function is usually not called from a context where the lock is
249 * held already, this function acquires the lock itself. A non-locking
250 * version is also available.
252 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
256 spin_lock_irq(&dev_priv->irq_lock);
257 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
258 spin_unlock_irq(&dev_priv->irq_lock);
262 gen11_gt_engine_identity(struct drm_i915_private * const i915,
263 const unsigned int bank, const unsigned int bit);
265 static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
266 const unsigned int bank,
267 const unsigned int bit)
269 void __iomem * const regs = i915->regs;
272 lockdep_assert_held(&i915->irq_lock);
274 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
277 * According to the BSpec, DW_IIR bits cannot be cleared without
278 * first servicing the Selector & Shared IIR registers.
280 gen11_gt_engine_identity(i915, bank, bit);
283 * We locked GT INT DW by reading it. If we want to (try
284 * to) recover from this succesfully, we need to clear
285 * our bit, otherwise we are locking the register for
288 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
297 * ilk_update_display_irq - update DEIMR
298 * @dev_priv: driver private
299 * @interrupt_mask: mask of interrupt bits to update
300 * @enabled_irq_mask: mask of interrupt bits to enable
302 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
304 u32 enabled_irq_mask)
308 lockdep_assert_held(&dev_priv->irq_lock);
310 WARN_ON(enabled_irq_mask & ~interrupt_mask);
312 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
315 new_val = dev_priv->irq_mask;
316 new_val &= ~interrupt_mask;
317 new_val |= (~enabled_irq_mask & interrupt_mask);
319 if (new_val != dev_priv->irq_mask) {
320 dev_priv->irq_mask = new_val;
321 I915_WRITE(DEIMR, dev_priv->irq_mask);
327 * ilk_update_gt_irq - update GTIMR
328 * @dev_priv: driver private
329 * @interrupt_mask: mask of interrupt bits to update
330 * @enabled_irq_mask: mask of interrupt bits to enable
332 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
334 u32 enabled_irq_mask)
336 lockdep_assert_held(&dev_priv->irq_lock);
338 WARN_ON(enabled_irq_mask & ~interrupt_mask);
340 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
343 dev_priv->gt_irq_mask &= ~interrupt_mask;
344 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
345 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
348 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
350 ilk_update_gt_irq(dev_priv, mask, mask);
351 POSTING_READ_FW(GTIMR);
354 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
356 ilk_update_gt_irq(dev_priv, mask, 0);
359 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
361 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
363 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
366 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
368 if (INTEL_GEN(dev_priv) >= 11)
369 return GEN11_GPM_WGBOXPERF_INTR_MASK;
370 else if (INTEL_GEN(dev_priv) >= 8)
371 return GEN8_GT_IMR(2);
376 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
378 if (INTEL_GEN(dev_priv) >= 11)
379 return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
380 else if (INTEL_GEN(dev_priv) >= 8)
381 return GEN8_GT_IER(2);
387 * snb_update_pm_irq - update GEN6_PMIMR
388 * @dev_priv: driver private
389 * @interrupt_mask: mask of interrupt bits to update
390 * @enabled_irq_mask: mask of interrupt bits to enable
392 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
394 u32 enabled_irq_mask)
398 WARN_ON(enabled_irq_mask & ~interrupt_mask);
400 lockdep_assert_held(&dev_priv->irq_lock);
402 new_val = dev_priv->pm_imr;
403 new_val &= ~interrupt_mask;
404 new_val |= (~enabled_irq_mask & interrupt_mask);
406 if (new_val != dev_priv->pm_imr) {
407 dev_priv->pm_imr = new_val;
408 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
409 POSTING_READ(gen6_pm_imr(dev_priv));
413 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
415 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
418 snb_update_pm_irq(dev_priv, mask, mask);
421 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
423 snb_update_pm_irq(dev_priv, mask, 0);
426 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
428 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
431 __gen6_mask_pm_irq(dev_priv, mask);
434 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
436 i915_reg_t reg = gen6_pm_iir(dev_priv);
438 lockdep_assert_held(&dev_priv->irq_lock);
440 I915_WRITE(reg, reset_mask);
441 I915_WRITE(reg, reset_mask);
445 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
447 lockdep_assert_held(&dev_priv->irq_lock);
449 dev_priv->pm_ier |= enable_mask;
450 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
451 gen6_unmask_pm_irq(dev_priv, enable_mask);
452 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
455 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
457 lockdep_assert_held(&dev_priv->irq_lock);
459 dev_priv->pm_ier &= ~disable_mask;
460 __gen6_mask_pm_irq(dev_priv, disable_mask);
461 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
462 /* though a barrier is missing here, but don't really need a one */
465 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
467 spin_lock_irq(&dev_priv->irq_lock);
469 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
472 dev_priv->gt_pm.rps.pm_iir = 0;
474 spin_unlock_irq(&dev_priv->irq_lock);
477 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
479 spin_lock_irq(&dev_priv->irq_lock);
480 gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
481 dev_priv->gt_pm.rps.pm_iir = 0;
482 spin_unlock_irq(&dev_priv->irq_lock);
485 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
487 struct intel_rps *rps = &dev_priv->gt_pm.rps;
489 if (READ_ONCE(rps->interrupts_enabled))
492 spin_lock_irq(&dev_priv->irq_lock);
493 WARN_ON_ONCE(rps->pm_iir);
495 if (INTEL_GEN(dev_priv) >= 11)
496 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
498 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
500 rps->interrupts_enabled = true;
501 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
503 spin_unlock_irq(&dev_priv->irq_lock);
506 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
508 struct intel_rps *rps = &dev_priv->gt_pm.rps;
510 if (!READ_ONCE(rps->interrupts_enabled))
513 spin_lock_irq(&dev_priv->irq_lock);
514 rps->interrupts_enabled = false;
516 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
518 gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
520 spin_unlock_irq(&dev_priv->irq_lock);
521 synchronize_irq(dev_priv->drm.irq);
523 /* Now that we will not be generating any more work, flush any
524 * outstanding tasks. As we are called on the RPS idle path,
525 * we will reset the GPU to minimum frequencies, so the current
526 * state of the worker can be discarded.
528 cancel_work_sync(&rps->work);
529 if (INTEL_GEN(dev_priv) >= 11)
530 gen11_reset_rps_interrupts(dev_priv);
532 gen6_reset_rps_interrupts(dev_priv);
535 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
537 assert_rpm_wakelock_held(dev_priv);
539 spin_lock_irq(&dev_priv->irq_lock);
540 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
541 spin_unlock_irq(&dev_priv->irq_lock);
544 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
546 assert_rpm_wakelock_held(dev_priv);
548 spin_lock_irq(&dev_priv->irq_lock);
549 if (!dev_priv->guc.interrupts_enabled) {
550 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
551 dev_priv->pm_guc_events);
552 dev_priv->guc.interrupts_enabled = true;
553 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
555 spin_unlock_irq(&dev_priv->irq_lock);
558 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
560 assert_rpm_wakelock_held(dev_priv);
562 spin_lock_irq(&dev_priv->irq_lock);
563 dev_priv->guc.interrupts_enabled = false;
565 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
567 spin_unlock_irq(&dev_priv->irq_lock);
568 synchronize_irq(dev_priv->drm.irq);
570 gen9_reset_guc_interrupts(dev_priv);
574 * bdw_update_port_irq - update DE port interrupt
575 * @dev_priv: driver private
576 * @interrupt_mask: mask of interrupt bits to update
577 * @enabled_irq_mask: mask of interrupt bits to enable
579 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
581 u32 enabled_irq_mask)
586 lockdep_assert_held(&dev_priv->irq_lock);
588 WARN_ON(enabled_irq_mask & ~interrupt_mask);
590 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
593 old_val = I915_READ(GEN8_DE_PORT_IMR);
596 new_val &= ~interrupt_mask;
597 new_val |= (~enabled_irq_mask & interrupt_mask);
599 if (new_val != old_val) {
600 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
601 POSTING_READ(GEN8_DE_PORT_IMR);
606 * bdw_update_pipe_irq - update DE pipe interrupt
607 * @dev_priv: driver private
608 * @pipe: pipe whose interrupt to update
609 * @interrupt_mask: mask of interrupt bits to update
610 * @enabled_irq_mask: mask of interrupt bits to enable
612 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
615 u32 enabled_irq_mask)
619 lockdep_assert_held(&dev_priv->irq_lock);
621 WARN_ON(enabled_irq_mask & ~interrupt_mask);
623 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
626 new_val = dev_priv->de_irq_mask[pipe];
627 new_val &= ~interrupt_mask;
628 new_val |= (~enabled_irq_mask & interrupt_mask);
630 if (new_val != dev_priv->de_irq_mask[pipe]) {
631 dev_priv->de_irq_mask[pipe] = new_val;
632 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
633 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
638 * ibx_display_interrupt_update - update SDEIMR
639 * @dev_priv: driver private
640 * @interrupt_mask: mask of interrupt bits to update
641 * @enabled_irq_mask: mask of interrupt bits to enable
643 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
645 u32 enabled_irq_mask)
647 u32 sdeimr = I915_READ(SDEIMR);
648 sdeimr &= ~interrupt_mask;
649 sdeimr |= (~enabled_irq_mask & interrupt_mask);
651 WARN_ON(enabled_irq_mask & ~interrupt_mask);
653 lockdep_assert_held(&dev_priv->irq_lock);
655 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
658 I915_WRITE(SDEIMR, sdeimr);
659 POSTING_READ(SDEIMR);
662 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
665 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
666 u32 enable_mask = status_mask << 16;
668 lockdep_assert_held(&dev_priv->irq_lock);
670 if (INTEL_GEN(dev_priv) < 5)
674 * On pipe A we don't support the PSR interrupt yet,
675 * on pipe B and C the same bit MBZ.
677 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
680 * On pipe B and C we don't support the PSR interrupt yet, on pipe
681 * A the same bit is for perf counters which we don't use either.
683 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
686 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
687 SPRITE0_FLIP_DONE_INT_EN_VLV |
688 SPRITE1_FLIP_DONE_INT_EN_VLV);
689 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
690 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
691 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
692 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
695 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
696 status_mask & ~PIPESTAT_INT_STATUS_MASK,
697 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
698 pipe_name(pipe), enable_mask, status_mask);
703 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
704 enum pipe pipe, u32 status_mask)
706 i915_reg_t reg = PIPESTAT(pipe);
709 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
710 "pipe %c: status_mask=0x%x\n",
711 pipe_name(pipe), status_mask);
713 lockdep_assert_held(&dev_priv->irq_lock);
714 WARN_ON(!intel_irqs_enabled(dev_priv));
716 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
719 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
720 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
722 I915_WRITE(reg, enable_mask | status_mask);
726 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
727 enum pipe pipe, u32 status_mask)
729 i915_reg_t reg = PIPESTAT(pipe);
732 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
733 "pipe %c: status_mask=0x%x\n",
734 pipe_name(pipe), status_mask);
736 lockdep_assert_held(&dev_priv->irq_lock);
737 WARN_ON(!intel_irqs_enabled(dev_priv));
739 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
742 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
743 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
745 I915_WRITE(reg, enable_mask | status_mask);
750 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
751 * @dev_priv: i915 device private
753 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
755 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
758 spin_lock_irq(&dev_priv->irq_lock);
760 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
761 if (INTEL_GEN(dev_priv) >= 4)
762 i915_enable_pipestat(dev_priv, PIPE_A,
763 PIPE_LEGACY_BLC_EVENT_STATUS);
765 spin_unlock_irq(&dev_priv->irq_lock);
769 * This timing diagram depicts the video signal in and
770 * around the vertical blanking period.
772 * Assumptions about the fictitious mode used in this example:
774 * vsync_start = vblank_start + 1
775 * vsync_end = vblank_start + 2
776 * vtotal = vblank_start + 3
779 * latch double buffered registers
780 * increment frame counter (ctg+)
781 * generate start of vblank interrupt (gen4+)
784 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
785 * | may be shifted forward 1-3 extra lines via PIPECONF
787 * | | start of vsync:
788 * | | generate vsync interrupt
790 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
791 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
792 * ----va---> <-----------------vb--------------------> <--------va-------------
793 * | | <----vs-----> |
794 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
795 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
796 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
798 * last visible pixel first visible pixel
799 * | increment frame counter (gen3/4)
800 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
802 * x = horizontal active
803 * _ = horizontal blanking
804 * hs = horizontal sync
805 * va = vertical active
806 * vb = vertical blanking
808 * vbs = vblank_start (number)
811 * - most events happen at the start of horizontal sync
812 * - frame start happens at the start of horizontal blank, 1-4 lines
813 * (depending on PIPECONF settings) after the start of vblank
814 * - gen3/4 pixel and frame counter are synchronized with the start
815 * of horizontal active on the first line of vertical active
818 /* Called from drm generic code, passed a 'crtc', which
819 * we use as a pipe index
821 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
823 struct drm_i915_private *dev_priv = to_i915(dev);
824 i915_reg_t high_frame, low_frame;
825 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
826 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
827 unsigned long irqflags;
829 htotal = mode->crtc_htotal;
830 hsync_start = mode->crtc_hsync_start;
831 vbl_start = mode->crtc_vblank_start;
832 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
833 vbl_start = DIV_ROUND_UP(vbl_start, 2);
835 /* Convert to pixel count */
838 /* Start of vblank event occurs at start of hsync */
839 vbl_start -= htotal - hsync_start;
841 high_frame = PIPEFRAME(pipe);
842 low_frame = PIPEFRAMEPIXEL(pipe);
844 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
847 * High & low register fields aren't synchronized, so make sure
848 * we get a low value that's stable across two reads of the high
852 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
853 low = I915_READ_FW(low_frame);
854 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
855 } while (high1 != high2);
857 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
859 high1 >>= PIPE_FRAME_HIGH_SHIFT;
860 pixel = low & PIPE_PIXEL_MASK;
861 low >>= PIPE_FRAME_LOW_SHIFT;
864 * The frame counter increments at beginning of active.
865 * Cook up a vblank counter by also checking the pixel
866 * counter against vblank start.
868 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
871 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
873 struct drm_i915_private *dev_priv = to_i915(dev);
875 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
879 * On certain encoders on certain platforms, pipe
880 * scanline register will not work to get the scanline,
881 * since the timings are driven from the PORT or issues
882 * with scanline register updates.
883 * This function will use Framestamp and current
884 * timestamp registers to calculate the scanline.
886 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
889 struct drm_vblank_crtc *vblank =
890 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
891 const struct drm_display_mode *mode = &vblank->hwmode;
892 u32 vblank_start = mode->crtc_vblank_start;
893 u32 vtotal = mode->crtc_vtotal;
894 u32 htotal = mode->crtc_htotal;
895 u32 clock = mode->crtc_clock;
896 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
899 * To avoid the race condition where we might cross into the
900 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
901 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
902 * during the same frame.
906 * This field provides read back of the display
907 * pipe frame time stamp. The time stamp value
908 * is sampled at every start of vertical blank.
910 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
913 * The TIMESTAMP_CTR register has the current
916 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
918 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
919 } while (scan_post_time != scan_prev_time);
921 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
922 clock), 1000 * htotal);
923 scanline = min(scanline, vtotal - 1);
924 scanline = (scanline + vblank_start) % vtotal;
929 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
930 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
932 struct drm_device *dev = crtc->base.dev;
933 struct drm_i915_private *dev_priv = to_i915(dev);
934 const struct drm_display_mode *mode;
935 struct drm_vblank_crtc *vblank;
936 enum pipe pipe = crtc->pipe;
937 int position, vtotal;
942 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
943 mode = &vblank->hwmode;
945 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
946 return __intel_get_crtc_scanline_from_timestamp(crtc);
948 vtotal = mode->crtc_vtotal;
949 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
952 if (IS_GEN(dev_priv, 2))
953 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
955 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
958 * On HSW, the DSL reg (0x70000) appears to return 0 if we
959 * read it just before the start of vblank. So try it again
960 * so we don't accidentally end up spanning a vblank frame
961 * increment, causing the pipe_update_end() code to squak at us.
963 * The nature of this problem means we can't simply check the ISR
964 * bit and return the vblank start value; nor can we use the scanline
965 * debug register in the transcoder as it appears to have the same
966 * problem. We may need to extend this to include other platforms,
967 * but so far testing only shows the problem on HSW.
969 if (HAS_DDI(dev_priv) && !position) {
972 for (i = 0; i < 100; i++) {
974 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
975 if (temp != position) {
983 * See update_scanline_offset() for the details on the
984 * scanline_offset adjustment.
986 return (position + crtc->scanline_offset) % vtotal;
989 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
990 bool in_vblank_irq, int *vpos, int *hpos,
991 ktime_t *stime, ktime_t *etime,
992 const struct drm_display_mode *mode)
994 struct drm_i915_private *dev_priv = to_i915(dev);
995 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
998 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
999 unsigned long irqflags;
1001 if (WARN_ON(!mode->crtc_clock)) {
1002 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1003 "pipe %c\n", pipe_name(pipe));
1007 htotal = mode->crtc_htotal;
1008 hsync_start = mode->crtc_hsync_start;
1009 vtotal = mode->crtc_vtotal;
1010 vbl_start = mode->crtc_vblank_start;
1011 vbl_end = mode->crtc_vblank_end;
1013 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1014 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1020 * Lock uncore.lock, as we will do multiple timing critical raw
1021 * register reads, potentially with preemption disabled, so the
1022 * following code must not block on uncore.lock.
1024 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1026 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1028 /* Get optional system timestamp before query. */
1030 *stime = ktime_get();
1032 if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1033 /* No obvious pixelcount register. Only query vertical
1034 * scanout position from Display scan line register.
1036 position = __intel_get_crtc_scanline(intel_crtc);
1038 /* Have access to pixelcount since start of frame.
1039 * We can split this into vertical and horizontal
1042 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1044 /* convert to pixel counts */
1045 vbl_start *= htotal;
1050 * In interlaced modes, the pixel counter counts all pixels,
1051 * so one field will have htotal more pixels. In order to avoid
1052 * the reported position from jumping backwards when the pixel
1053 * counter is beyond the length of the shorter field, just
1054 * clamp the position the length of the shorter field. This
1055 * matches how the scanline counter based position works since
1056 * the scanline counter doesn't count the two half lines.
1058 if (position >= vtotal)
1059 position = vtotal - 1;
1062 * Start of vblank interrupt is triggered at start of hsync,
1063 * just prior to the first active line of vblank. However we
1064 * consider lines to start at the leading edge of horizontal
1065 * active. So, should we get here before we've crossed into
1066 * the horizontal active of the first line in vblank, we would
1067 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1068 * always add htotal-hsync_start to the current pixel position.
1070 position = (position + htotal - hsync_start) % vtotal;
1073 /* Get optional system timestamp after query. */
1075 *etime = ktime_get();
1077 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1079 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1082 * While in vblank, position will be negative
1083 * counting up towards 0 at vbl_end. And outside
1084 * vblank, position will be positive counting
1087 if (position >= vbl_start)
1088 position -= vbl_end;
1090 position += vtotal - vbl_end;
1092 if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1096 *vpos = position / htotal;
1097 *hpos = position - (*vpos * htotal);
1103 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1105 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1106 unsigned long irqflags;
1109 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1110 position = __intel_get_crtc_scanline(crtc);
1111 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1116 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1118 u32 busy_up, busy_down, max_avg, min_avg;
1121 spin_lock(&mchdev_lock);
1123 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1125 new_delay = dev_priv->ips.cur_delay;
1127 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1128 busy_up = I915_READ(RCPREVBSYTUPAVG);
1129 busy_down = I915_READ(RCPREVBSYTDNAVG);
1130 max_avg = I915_READ(RCBMAXAVG);
1131 min_avg = I915_READ(RCBMINAVG);
1133 /* Handle RCS change request from hw */
1134 if (busy_up > max_avg) {
1135 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1136 new_delay = dev_priv->ips.cur_delay - 1;
1137 if (new_delay < dev_priv->ips.max_delay)
1138 new_delay = dev_priv->ips.max_delay;
1139 } else if (busy_down < min_avg) {
1140 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1141 new_delay = dev_priv->ips.cur_delay + 1;
1142 if (new_delay > dev_priv->ips.min_delay)
1143 new_delay = dev_priv->ips.min_delay;
1146 if (ironlake_set_drps(dev_priv, new_delay))
1147 dev_priv->ips.cur_delay = new_delay;
1149 spin_unlock(&mchdev_lock);
1154 static void notify_ring(struct intel_engine_cs *engine)
1156 const u32 seqno = intel_engine_get_seqno(engine);
1157 struct i915_request *rq = NULL;
1158 struct task_struct *tsk = NULL;
1159 struct intel_wait *wait;
1161 if (unlikely(!engine->breadcrumbs.irq_armed))
1166 spin_lock(&engine->breadcrumbs.irq_lock);
1167 wait = engine->breadcrumbs.irq_wait;
1170 * We use a callback from the dma-fence to submit
1171 * requests after waiting on our own requests. To
1172 * ensure minimum delay in queuing the next request to
1173 * hardware, signal the fence now rather than wait for
1174 * the signaler to be woken up. We still wake up the
1175 * waiter in order to handle the irq-seqno coherency
1176 * issues (we may receive the interrupt before the
1177 * seqno is written, see __i915_request_irq_complete())
1178 * and to handle coalescing of multiple seqno updates
1181 if (i915_seqno_passed(seqno, wait->seqno)) {
1182 struct i915_request *waiter = wait->request;
1185 !i915_request_signaled(waiter) &&
1186 intel_wait_check_request(wait, waiter))
1187 rq = i915_request_get(waiter);
1192 engine->breadcrumbs.irq_count++;
1194 if (engine->breadcrumbs.irq_armed)
1195 __intel_engine_disarm_breadcrumbs(engine);
1197 spin_unlock(&engine->breadcrumbs.irq_lock);
1200 spin_lock(&rq->lock);
1201 dma_fence_signal_locked(&rq->fence);
1202 GEM_BUG_ON(!i915_request_completed(rq));
1203 spin_unlock(&rq->lock);
1205 i915_request_put(rq);
1208 if (tsk && tsk->state & TASK_NORMAL)
1209 wake_up_process(tsk);
1213 trace_intel_engine_notify(engine, wait);
1216 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1217 struct intel_rps_ei *ei)
1219 ei->ktime = ktime_get_raw();
1220 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1221 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1224 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1226 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1229 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1231 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1232 const struct intel_rps_ei *prev = &rps->ei;
1233 struct intel_rps_ei now;
1236 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1239 vlv_c0_read(dev_priv, &now);
1245 time = ktime_us_delta(now.ktime, prev->ktime);
1247 time *= dev_priv->czclk_freq;
1249 /* Workload can be split between render + media,
1250 * e.g. SwapBuffers being blitted in X after being rendered in
1251 * mesa. To account for this we need to combine both engines
1252 * into our activity counter.
1254 render = now.render_c0 - prev->render_c0;
1255 media = now.media_c0 - prev->media_c0;
1256 c0 = max(render, media);
1257 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1259 if (c0 > time * rps->power.up_threshold)
1260 events = GEN6_PM_RP_UP_THRESHOLD;
1261 else if (c0 < time * rps->power.down_threshold)
1262 events = GEN6_PM_RP_DOWN_THRESHOLD;
1269 static void gen6_pm_rps_work(struct work_struct *work)
1271 struct drm_i915_private *dev_priv =
1272 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1273 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1274 bool client_boost = false;
1275 int new_delay, adj, min, max;
1278 spin_lock_irq(&dev_priv->irq_lock);
1279 if (rps->interrupts_enabled) {
1280 pm_iir = fetch_and_zero(&rps->pm_iir);
1281 client_boost = atomic_read(&rps->num_waiters);
1283 spin_unlock_irq(&dev_priv->irq_lock);
1285 /* Make sure we didn't queue anything we're not going to process. */
1286 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1287 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1290 mutex_lock(&dev_priv->pcu_lock);
1292 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1294 adj = rps->last_adj;
1295 new_delay = rps->cur_freq;
1296 min = rps->min_freq_softlimit;
1297 max = rps->max_freq_softlimit;
1299 max = rps->max_freq;
1300 if (client_boost && new_delay < rps->boost_freq) {
1301 new_delay = rps->boost_freq;
1303 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1306 else /* CHV needs even encode values */
1307 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1309 if (new_delay >= rps->max_freq_softlimit)
1311 } else if (client_boost) {
1313 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1314 if (rps->cur_freq > rps->efficient_freq)
1315 new_delay = rps->efficient_freq;
1316 else if (rps->cur_freq > rps->min_freq_softlimit)
1317 new_delay = rps->min_freq_softlimit;
1319 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1322 else /* CHV needs even encode values */
1323 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1325 if (new_delay <= rps->min_freq_softlimit)
1327 } else { /* unknown event */
1331 rps->last_adj = adj;
1333 /* sysfs frequency interfaces may have snuck in while servicing the
1337 new_delay = clamp_t(int, new_delay, min, max);
1339 if (intel_set_rps(dev_priv, new_delay)) {
1340 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1344 mutex_unlock(&dev_priv->pcu_lock);
1347 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1348 spin_lock_irq(&dev_priv->irq_lock);
1349 if (rps->interrupts_enabled)
1350 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1351 spin_unlock_irq(&dev_priv->irq_lock);
1356 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1358 * @work: workqueue struct
1360 * Doesn't actually do anything except notify userspace. As a consequence of
1361 * this event, userspace should try to remap the bad rows since statistically
1362 * it is likely the same row is more likely to go bad again.
1364 static void ivybridge_parity_work(struct work_struct *work)
1366 struct drm_i915_private *dev_priv =
1367 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1368 u32 error_status, row, bank, subbank;
1369 char *parity_event[6];
1373 /* We must turn off DOP level clock gating to access the L3 registers.
1374 * In order to prevent a get/put style interface, acquire struct mutex
1375 * any time we access those registers.
1377 mutex_lock(&dev_priv->drm.struct_mutex);
1379 /* If we've screwed up tracking, just let the interrupt fire again */
1380 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1383 misccpctl = I915_READ(GEN7_MISCCPCTL);
1384 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1385 POSTING_READ(GEN7_MISCCPCTL);
1387 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1391 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1394 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1396 reg = GEN7_L3CDERRST1(slice);
1398 error_status = I915_READ(reg);
1399 row = GEN7_PARITY_ERROR_ROW(error_status);
1400 bank = GEN7_PARITY_ERROR_BANK(error_status);
1401 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1403 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1406 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1407 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1408 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1409 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1410 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1411 parity_event[5] = NULL;
1413 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1414 KOBJ_CHANGE, parity_event);
1416 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1417 slice, row, bank, subbank);
1419 kfree(parity_event[4]);
1420 kfree(parity_event[3]);
1421 kfree(parity_event[2]);
1422 kfree(parity_event[1]);
1425 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1428 WARN_ON(dev_priv->l3_parity.which_slice);
1429 spin_lock_irq(&dev_priv->irq_lock);
1430 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1431 spin_unlock_irq(&dev_priv->irq_lock);
1433 mutex_unlock(&dev_priv->drm.struct_mutex);
1436 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1439 if (!HAS_L3_DPF(dev_priv))
1442 spin_lock(&dev_priv->irq_lock);
1443 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1444 spin_unlock(&dev_priv->irq_lock);
1446 iir &= GT_PARITY_ERROR(dev_priv);
1447 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1448 dev_priv->l3_parity.which_slice |= 1 << 1;
1450 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1451 dev_priv->l3_parity.which_slice |= 1 << 0;
1453 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1456 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1459 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1460 notify_ring(dev_priv->engine[RCS]);
1461 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1462 notify_ring(dev_priv->engine[VCS]);
1465 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1468 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1469 notify_ring(dev_priv->engine[RCS]);
1470 if (gt_iir & GT_BSD_USER_INTERRUPT)
1471 notify_ring(dev_priv->engine[VCS]);
1472 if (gt_iir & GT_BLT_USER_INTERRUPT)
1473 notify_ring(dev_priv->engine[BCS]);
1475 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1476 GT_BSD_CS_ERROR_INTERRUPT |
1477 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1478 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1480 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1481 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1485 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1487 bool tasklet = false;
1489 if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1492 if (iir & GT_RENDER_USER_INTERRUPT) {
1493 notify_ring(engine);
1494 tasklet |= USES_GUC_SUBMISSION(engine->i915);
1498 tasklet_hi_schedule(&engine->execlists.tasklet);
1501 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1502 u32 master_ctl, u32 gt_iir[4])
1504 void __iomem * const regs = i915->regs;
1506 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1508 GEN8_GT_VCS1_IRQ | \
1509 GEN8_GT_VCS2_IRQ | \
1510 GEN8_GT_VECS_IRQ | \
1514 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1515 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1516 if (likely(gt_iir[0]))
1517 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1520 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1521 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1522 if (likely(gt_iir[1]))
1523 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1526 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1527 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1528 if (likely(gt_iir[2]))
1529 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1532 if (master_ctl & GEN8_GT_VECS_IRQ) {
1533 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1534 if (likely(gt_iir[3]))
1535 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1539 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1540 u32 master_ctl, u32 gt_iir[4])
1542 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1543 gen8_cs_irq_handler(i915->engine[RCS],
1544 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1545 gen8_cs_irq_handler(i915->engine[BCS],
1546 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1549 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1550 gen8_cs_irq_handler(i915->engine[VCS],
1551 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1552 gen8_cs_irq_handler(i915->engine[VCS2],
1553 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1556 if (master_ctl & GEN8_GT_VECS_IRQ) {
1557 gen8_cs_irq_handler(i915->engine[VECS],
1558 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1561 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1562 gen6_rps_irq_handler(i915, gt_iir[2]);
1563 gen9_guc_irq_handler(i915, gt_iir[2]);
1567 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1571 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1573 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1575 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1577 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1583 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1587 return val & PORTA_HOTPLUG_LONG_DETECT;
1589 return val & PORTB_HOTPLUG_LONG_DETECT;
1591 return val & PORTC_HOTPLUG_LONG_DETECT;
1597 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1601 return val & ICP_DDIA_HPD_LONG_DETECT;
1603 return val & ICP_DDIB_HPD_LONG_DETECT;
1609 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1613 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1615 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1617 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1619 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1625 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1629 return val & PORTE_HOTPLUG_LONG_DETECT;
1635 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1639 return val & PORTA_HOTPLUG_LONG_DETECT;
1641 return val & PORTB_HOTPLUG_LONG_DETECT;
1643 return val & PORTC_HOTPLUG_LONG_DETECT;
1645 return val & PORTD_HOTPLUG_LONG_DETECT;
1651 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1655 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1661 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1665 return val & PORTB_HOTPLUG_LONG_DETECT;
1667 return val & PORTC_HOTPLUG_LONG_DETECT;
1669 return val & PORTD_HOTPLUG_LONG_DETECT;
1675 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1679 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1681 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1683 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1690 * Get a bit mask of pins that have triggered, and which ones may be long.
1691 * This can be called multiple times with the same masks to accumulate
1692 * hotplug detection results from several registers.
1694 * Note that the caller is expected to zero out the masks initially.
1696 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1697 u32 *pin_mask, u32 *long_mask,
1698 u32 hotplug_trigger, u32 dig_hotplug_reg,
1699 const u32 hpd[HPD_NUM_PINS],
1700 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1704 for_each_hpd_pin(pin) {
1705 if ((hpd[pin] & hotplug_trigger) == 0)
1708 *pin_mask |= BIT(pin);
1710 if (long_pulse_detect(pin, dig_hotplug_reg))
1711 *long_mask |= BIT(pin);
1714 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1715 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1719 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1721 wake_up_all(&dev_priv->gmbus_wait_queue);
1724 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1726 wake_up_all(&dev_priv->gmbus_wait_queue);
1729 #if defined(CONFIG_DEBUG_FS)
1730 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1736 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1737 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1740 spin_lock(&pipe_crc->lock);
1742 * For some not yet identified reason, the first CRC is
1743 * bonkers. So let's just wait for the next vblank and read
1744 * out the buggy result.
1746 * On GEN8+ sometimes the second CRC is bonkers as well, so
1747 * don't trust that one either.
1749 if (pipe_crc->skipped <= 0 ||
1750 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1751 pipe_crc->skipped++;
1752 spin_unlock(&pipe_crc->lock);
1755 spin_unlock(&pipe_crc->lock);
1762 drm_crtc_add_crc_entry(&crtc->base, true,
1763 drm_crtc_accurate_vblank_count(&crtc->base),
1768 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1776 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1779 display_pipe_crc_irq_handler(dev_priv, pipe,
1780 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1784 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1787 display_pipe_crc_irq_handler(dev_priv, pipe,
1788 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1789 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1790 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1791 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1792 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1795 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1800 if (INTEL_GEN(dev_priv) >= 3)
1801 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1805 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1806 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1810 display_pipe_crc_irq_handler(dev_priv, pipe,
1811 I915_READ(PIPE_CRC_RES_RED(pipe)),
1812 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1813 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1817 /* The RPS events need forcewake, so we add them to a work queue and mask their
1818 * IMR bits until the work is done. Other interrupts can be processed without
1819 * the work queue. */
1820 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1822 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1824 if (pm_iir & dev_priv->pm_rps_events) {
1825 spin_lock(&dev_priv->irq_lock);
1826 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1827 if (rps->interrupts_enabled) {
1828 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1829 schedule_work(&rps->work);
1831 spin_unlock(&dev_priv->irq_lock);
1834 if (INTEL_GEN(dev_priv) >= 8)
1837 if (HAS_VEBOX(dev_priv)) {
1838 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1839 notify_ring(dev_priv->engine[VECS]);
1841 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1842 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1846 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1848 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1849 intel_guc_to_host_event_handler(&dev_priv->guc);
1852 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1856 for_each_pipe(dev_priv, pipe) {
1857 I915_WRITE(PIPESTAT(pipe),
1858 PIPESTAT_INT_STATUS_MASK |
1859 PIPE_FIFO_UNDERRUN_STATUS);
1861 dev_priv->pipestat_irq_mask[pipe] = 0;
1865 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1866 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1870 spin_lock(&dev_priv->irq_lock);
1872 if (!dev_priv->display_irqs_enabled) {
1873 spin_unlock(&dev_priv->irq_lock);
1877 for_each_pipe(dev_priv, pipe) {
1879 u32 status_mask, enable_mask, iir_bit = 0;
1882 * PIPESTAT bits get signalled even when the interrupt is
1883 * disabled with the mask bits, and some of the status bits do
1884 * not generate interrupts at all (like the underrun bit). Hence
1885 * we need to be careful that we only handle what we want to
1889 /* fifo underruns are filterered in the underrun handler. */
1890 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1894 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1897 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1900 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1904 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1909 reg = PIPESTAT(pipe);
1910 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1911 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1914 * Clear the PIPE*STAT regs before the IIR
1916 * Toggle the enable bits to make sure we get an
1917 * edge in the ISR pipe event bit if we don't clear
1918 * all the enabled status bits. Otherwise the edge
1919 * triggered IIR on i965/g4x wouldn't notice that
1920 * an interrupt is still pending.
1922 if (pipe_stats[pipe]) {
1923 I915_WRITE(reg, pipe_stats[pipe]);
1924 I915_WRITE(reg, enable_mask);
1927 spin_unlock(&dev_priv->irq_lock);
1930 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1931 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1935 for_each_pipe(dev_priv, pipe) {
1936 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1937 drm_handle_vblank(&dev_priv->drm, pipe);
1939 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1940 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1942 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1943 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1947 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1948 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1950 bool blc_event = false;
1953 for_each_pipe(dev_priv, pipe) {
1954 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1955 drm_handle_vblank(&dev_priv->drm, pipe);
1957 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1960 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1961 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1963 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1964 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1967 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1968 intel_opregion_asle_intr(dev_priv);
1971 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1972 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1974 bool blc_event = false;
1977 for_each_pipe(dev_priv, pipe) {
1978 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1979 drm_handle_vblank(&dev_priv->drm, pipe);
1981 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1984 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1985 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1987 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1988 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1991 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1992 intel_opregion_asle_intr(dev_priv);
1994 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1995 gmbus_irq_handler(dev_priv);
1998 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1999 u32 pipe_stats[I915_MAX_PIPES])
2003 for_each_pipe(dev_priv, pipe) {
2004 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2005 drm_handle_vblank(&dev_priv->drm, pipe);
2007 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2008 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2010 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2011 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2014 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2015 gmbus_irq_handler(dev_priv);
2018 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2020 u32 hotplug_status = 0, hotplug_status_mask;
2023 if (IS_G4X(dev_priv) ||
2024 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2025 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2026 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2028 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2031 * We absolutely have to clear all the pending interrupt
2032 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2033 * interrupt bit won't have an edge, and the i965/g4x
2034 * edge triggered IIR will not notice that an interrupt
2035 * is still pending. We can't use PORT_HOTPLUG_EN to
2036 * guarantee the edge as the act of toggling the enable
2037 * bits can itself generate a new hotplug interrupt :(
2039 for (i = 0; i < 10; i++) {
2040 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2043 return hotplug_status;
2045 hotplug_status |= tmp;
2046 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2050 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2051 I915_READ(PORT_HOTPLUG_STAT));
2053 return hotplug_status;
2056 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2059 u32 pin_mask = 0, long_mask = 0;
2061 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2062 IS_CHERRYVIEW(dev_priv)) {
2063 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2065 if (hotplug_trigger) {
2066 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2067 hotplug_trigger, hotplug_trigger,
2069 i9xx_port_hotplug_long_detect);
2071 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2074 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2075 dp_aux_irq_handler(dev_priv);
2077 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2079 if (hotplug_trigger) {
2080 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2081 hotplug_trigger, hotplug_trigger,
2083 i9xx_port_hotplug_long_detect);
2084 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2089 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2091 struct drm_device *dev = arg;
2092 struct drm_i915_private *dev_priv = to_i915(dev);
2093 irqreturn_t ret = IRQ_NONE;
2095 if (!intel_irqs_enabled(dev_priv))
2098 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2099 disable_rpm_wakeref_asserts(dev_priv);
2102 u32 iir, gt_iir, pm_iir;
2103 u32 pipe_stats[I915_MAX_PIPES] = {};
2104 u32 hotplug_status = 0;
2107 gt_iir = I915_READ(GTIIR);
2108 pm_iir = I915_READ(GEN6_PMIIR);
2109 iir = I915_READ(VLV_IIR);
2111 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2117 * Theory on interrupt generation, based on empirical evidence:
2119 * x = ((VLV_IIR & VLV_IER) ||
2120 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2121 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2123 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2124 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2125 * guarantee the CPU interrupt will be raised again even if we
2126 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2127 * bits this time around.
2129 I915_WRITE(VLV_MASTER_IER, 0);
2130 ier = I915_READ(VLV_IER);
2131 I915_WRITE(VLV_IER, 0);
2134 I915_WRITE(GTIIR, gt_iir);
2136 I915_WRITE(GEN6_PMIIR, pm_iir);
2138 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2139 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2141 /* Call regardless, as some status bits might not be
2142 * signalled in iir */
2143 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2145 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2146 I915_LPE_PIPE_B_INTERRUPT))
2147 intel_lpe_audio_irq_handler(dev_priv);
2150 * VLV_IIR is single buffered, and reflects the level
2151 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2154 I915_WRITE(VLV_IIR, iir);
2156 I915_WRITE(VLV_IER, ier);
2157 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2160 snb_gt_irq_handler(dev_priv, gt_iir);
2162 gen6_rps_irq_handler(dev_priv, pm_iir);
2165 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2167 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2170 enable_rpm_wakeref_asserts(dev_priv);
2175 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2177 struct drm_device *dev = arg;
2178 struct drm_i915_private *dev_priv = to_i915(dev);
2179 irqreturn_t ret = IRQ_NONE;
2181 if (!intel_irqs_enabled(dev_priv))
2184 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2185 disable_rpm_wakeref_asserts(dev_priv);
2188 u32 master_ctl, iir;
2189 u32 pipe_stats[I915_MAX_PIPES] = {};
2190 u32 hotplug_status = 0;
2194 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2195 iir = I915_READ(VLV_IIR);
2197 if (master_ctl == 0 && iir == 0)
2203 * Theory on interrupt generation, based on empirical evidence:
2205 * x = ((VLV_IIR & VLV_IER) ||
2206 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2207 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2209 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2210 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2211 * guarantee the CPU interrupt will be raised again even if we
2212 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2213 * bits this time around.
2215 I915_WRITE(GEN8_MASTER_IRQ, 0);
2216 ier = I915_READ(VLV_IER);
2217 I915_WRITE(VLV_IER, 0);
2219 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2221 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2222 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2224 /* Call regardless, as some status bits might not be
2225 * signalled in iir */
2226 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2228 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2229 I915_LPE_PIPE_B_INTERRUPT |
2230 I915_LPE_PIPE_C_INTERRUPT))
2231 intel_lpe_audio_irq_handler(dev_priv);
2234 * VLV_IIR is single buffered, and reflects the level
2235 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2238 I915_WRITE(VLV_IIR, iir);
2240 I915_WRITE(VLV_IER, ier);
2241 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2243 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2246 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2248 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2251 enable_rpm_wakeref_asserts(dev_priv);
2256 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2257 u32 hotplug_trigger,
2258 const u32 hpd[HPD_NUM_PINS])
2260 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2263 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2264 * unless we touch the hotplug register, even if hotplug_trigger is
2265 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2268 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2269 if (!hotplug_trigger) {
2270 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2271 PORTD_HOTPLUG_STATUS_MASK |
2272 PORTC_HOTPLUG_STATUS_MASK |
2273 PORTB_HOTPLUG_STATUS_MASK;
2274 dig_hotplug_reg &= ~mask;
2277 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2278 if (!hotplug_trigger)
2281 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2282 dig_hotplug_reg, hpd,
2283 pch_port_hotplug_long_detect);
2285 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2288 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2291 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2293 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2295 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2296 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2297 SDE_AUDIO_POWER_SHIFT);
2298 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2302 if (pch_iir & SDE_AUX_MASK)
2303 dp_aux_irq_handler(dev_priv);
2305 if (pch_iir & SDE_GMBUS)
2306 gmbus_irq_handler(dev_priv);
2308 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2309 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2311 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2312 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2314 if (pch_iir & SDE_POISON)
2315 DRM_ERROR("PCH poison interrupt\n");
2317 if (pch_iir & SDE_FDI_MASK)
2318 for_each_pipe(dev_priv, pipe)
2319 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2321 I915_READ(FDI_RX_IIR(pipe)));
2323 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2324 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2326 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2327 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2329 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2330 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2332 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2333 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2336 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2338 u32 err_int = I915_READ(GEN7_ERR_INT);
2341 if (err_int & ERR_INT_POISON)
2342 DRM_ERROR("Poison interrupt\n");
2344 for_each_pipe(dev_priv, pipe) {
2345 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2346 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2348 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2349 if (IS_IVYBRIDGE(dev_priv))
2350 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2352 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2356 I915_WRITE(GEN7_ERR_INT, err_int);
2359 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2361 u32 serr_int = I915_READ(SERR_INT);
2364 if (serr_int & SERR_INT_POISON)
2365 DRM_ERROR("PCH poison interrupt\n");
2367 for_each_pipe(dev_priv, pipe)
2368 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2369 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2371 I915_WRITE(SERR_INT, serr_int);
2374 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2377 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2379 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2381 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2382 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2383 SDE_AUDIO_POWER_SHIFT_CPT);
2384 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2388 if (pch_iir & SDE_AUX_MASK_CPT)
2389 dp_aux_irq_handler(dev_priv);
2391 if (pch_iir & SDE_GMBUS_CPT)
2392 gmbus_irq_handler(dev_priv);
2394 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2395 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2397 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2398 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2400 if (pch_iir & SDE_FDI_MASK_CPT)
2401 for_each_pipe(dev_priv, pipe)
2402 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2404 I915_READ(FDI_RX_IIR(pipe)));
2406 if (pch_iir & SDE_ERROR_CPT)
2407 cpt_serr_int_handler(dev_priv);
2410 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2412 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2413 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2414 u32 pin_mask = 0, long_mask = 0;
2416 if (ddi_hotplug_trigger) {
2417 u32 dig_hotplug_reg;
2419 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2420 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2422 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2423 ddi_hotplug_trigger,
2424 dig_hotplug_reg, hpd_icp,
2425 icp_ddi_port_hotplug_long_detect);
2428 if (tc_hotplug_trigger) {
2429 u32 dig_hotplug_reg;
2431 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2432 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2434 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2436 dig_hotplug_reg, hpd_icp,
2437 icp_tc_port_hotplug_long_detect);
2441 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2443 if (pch_iir & SDE_GMBUS_ICP)
2444 gmbus_irq_handler(dev_priv);
2447 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2449 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2450 ~SDE_PORTE_HOTPLUG_SPT;
2451 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2452 u32 pin_mask = 0, long_mask = 0;
2454 if (hotplug_trigger) {
2455 u32 dig_hotplug_reg;
2457 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2458 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2460 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2461 hotplug_trigger, dig_hotplug_reg, hpd_spt,
2462 spt_port_hotplug_long_detect);
2465 if (hotplug2_trigger) {
2466 u32 dig_hotplug_reg;
2468 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2469 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2471 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2472 hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2473 spt_port_hotplug2_long_detect);
2477 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2479 if (pch_iir & SDE_GMBUS_CPT)
2480 gmbus_irq_handler(dev_priv);
2483 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2484 u32 hotplug_trigger,
2485 const u32 hpd[HPD_NUM_PINS])
2487 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2489 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2490 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2492 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2493 dig_hotplug_reg, hpd,
2494 ilk_port_hotplug_long_detect);
2496 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2499 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2503 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2505 if (hotplug_trigger)
2506 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2508 if (de_iir & DE_AUX_CHANNEL_A)
2509 dp_aux_irq_handler(dev_priv);
2511 if (de_iir & DE_GSE)
2512 intel_opregion_asle_intr(dev_priv);
2514 if (de_iir & DE_POISON)
2515 DRM_ERROR("Poison interrupt\n");
2517 for_each_pipe(dev_priv, pipe) {
2518 if (de_iir & DE_PIPE_VBLANK(pipe))
2519 drm_handle_vblank(&dev_priv->drm, pipe);
2521 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2522 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2524 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2525 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2528 /* check event from PCH */
2529 if (de_iir & DE_PCH_EVENT) {
2530 u32 pch_iir = I915_READ(SDEIIR);
2532 if (HAS_PCH_CPT(dev_priv))
2533 cpt_irq_handler(dev_priv, pch_iir);
2535 ibx_irq_handler(dev_priv, pch_iir);
2537 /* should clear PCH hotplug event before clear CPU irq */
2538 I915_WRITE(SDEIIR, pch_iir);
2541 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2542 ironlake_rps_change_irq_handler(dev_priv);
2545 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2549 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2551 if (hotplug_trigger)
2552 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2554 if (de_iir & DE_ERR_INT_IVB)
2555 ivb_err_int_handler(dev_priv);
2557 if (de_iir & DE_EDP_PSR_INT_HSW) {
2558 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2560 intel_psr_irq_handler(dev_priv, psr_iir);
2561 I915_WRITE(EDP_PSR_IIR, psr_iir);
2564 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2565 dp_aux_irq_handler(dev_priv);
2567 if (de_iir & DE_GSE_IVB)
2568 intel_opregion_asle_intr(dev_priv);
2570 for_each_pipe(dev_priv, pipe) {
2571 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2572 drm_handle_vblank(&dev_priv->drm, pipe);
2575 /* check event from PCH */
2576 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2577 u32 pch_iir = I915_READ(SDEIIR);
2579 cpt_irq_handler(dev_priv, pch_iir);
2581 /* clear PCH hotplug event before clear CPU irq */
2582 I915_WRITE(SDEIIR, pch_iir);
2587 * To handle irqs with the minimum potential races with fresh interrupts, we:
2588 * 1 - Disable Master Interrupt Control.
2589 * 2 - Find the source(s) of the interrupt.
2590 * 3 - Clear the Interrupt Identity bits (IIR).
2591 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2592 * 5 - Re-enable Master Interrupt Control.
2594 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2596 struct drm_device *dev = arg;
2597 struct drm_i915_private *dev_priv = to_i915(dev);
2598 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2599 irqreturn_t ret = IRQ_NONE;
2601 if (!intel_irqs_enabled(dev_priv))
2604 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2605 disable_rpm_wakeref_asserts(dev_priv);
2607 /* disable master interrupt before clearing iir */
2608 de_ier = I915_READ(DEIER);
2609 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2611 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2612 * interrupts will will be stored on its back queue, and then we'll be
2613 * able to process them after we restore SDEIER (as soon as we restore
2614 * it, we'll get an interrupt if SDEIIR still has something to process
2615 * due to its back queue). */
2616 if (!HAS_PCH_NOP(dev_priv)) {
2617 sde_ier = I915_READ(SDEIER);
2618 I915_WRITE(SDEIER, 0);
2621 /* Find, clear, then process each source of interrupt */
2623 gt_iir = I915_READ(GTIIR);
2625 I915_WRITE(GTIIR, gt_iir);
2627 if (INTEL_GEN(dev_priv) >= 6)
2628 snb_gt_irq_handler(dev_priv, gt_iir);
2630 ilk_gt_irq_handler(dev_priv, gt_iir);
2633 de_iir = I915_READ(DEIIR);
2635 I915_WRITE(DEIIR, de_iir);
2637 if (INTEL_GEN(dev_priv) >= 7)
2638 ivb_display_irq_handler(dev_priv, de_iir);
2640 ilk_display_irq_handler(dev_priv, de_iir);
2643 if (INTEL_GEN(dev_priv) >= 6) {
2644 u32 pm_iir = I915_READ(GEN6_PMIIR);
2646 I915_WRITE(GEN6_PMIIR, pm_iir);
2648 gen6_rps_irq_handler(dev_priv, pm_iir);
2652 I915_WRITE(DEIER, de_ier);
2653 if (!HAS_PCH_NOP(dev_priv))
2654 I915_WRITE(SDEIER, sde_ier);
2656 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2657 enable_rpm_wakeref_asserts(dev_priv);
2662 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2663 u32 hotplug_trigger,
2664 const u32 hpd[HPD_NUM_PINS])
2666 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2668 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2669 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2671 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2672 dig_hotplug_reg, hpd,
2673 bxt_port_hotplug_long_detect);
2675 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2678 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2680 u32 pin_mask = 0, long_mask = 0;
2681 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2682 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2685 u32 dig_hotplug_reg;
2687 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2688 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2690 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2691 dig_hotplug_reg, hpd_gen11,
2692 gen11_port_hotplug_long_detect);
2696 u32 dig_hotplug_reg;
2698 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2699 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2701 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2702 dig_hotplug_reg, hpd_gen11,
2703 gen11_port_hotplug_long_detect);
2707 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2709 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2713 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2715 irqreturn_t ret = IRQ_NONE;
2719 if (master_ctl & GEN8_DE_MISC_IRQ) {
2720 iir = I915_READ(GEN8_DE_MISC_IIR);
2724 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2727 if (iir & GEN8_DE_MISC_GSE) {
2728 intel_opregion_asle_intr(dev_priv);
2732 if (iir & GEN8_DE_EDP_PSR) {
2733 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2735 intel_psr_irq_handler(dev_priv, psr_iir);
2736 I915_WRITE(EDP_PSR_IIR, psr_iir);
2741 DRM_ERROR("Unexpected DE Misc interrupt\n");
2744 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2747 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2748 iir = I915_READ(GEN11_DE_HPD_IIR);
2750 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2752 gen11_hpd_irq_handler(dev_priv, iir);
2754 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2758 if (master_ctl & GEN8_DE_PORT_IRQ) {
2759 iir = I915_READ(GEN8_DE_PORT_IIR);
2764 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2767 tmp_mask = GEN8_AUX_CHANNEL_A;
2768 if (INTEL_GEN(dev_priv) >= 9)
2769 tmp_mask |= GEN9_AUX_CHANNEL_B |
2770 GEN9_AUX_CHANNEL_C |
2773 if (INTEL_GEN(dev_priv) >= 11)
2774 tmp_mask |= ICL_AUX_CHANNEL_E;
2776 if (IS_CNL_WITH_PORT_F(dev_priv) ||
2777 INTEL_GEN(dev_priv) >= 11)
2778 tmp_mask |= CNL_AUX_CHANNEL_F;
2780 if (iir & tmp_mask) {
2781 dp_aux_irq_handler(dev_priv);
2785 if (IS_GEN9_LP(dev_priv)) {
2786 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2788 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2792 } else if (IS_BROADWELL(dev_priv)) {
2793 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2795 ilk_hpd_irq_handler(dev_priv,
2801 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2802 gmbus_irq_handler(dev_priv);
2807 DRM_ERROR("Unexpected DE Port interrupt\n");
2810 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2813 for_each_pipe(dev_priv, pipe) {
2816 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2819 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2821 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2826 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2828 if (iir & GEN8_PIPE_VBLANK)
2829 drm_handle_vblank(&dev_priv->drm, pipe);
2831 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2832 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2834 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2835 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2838 if (INTEL_GEN(dev_priv) >= 9)
2839 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2841 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2844 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2849 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2850 master_ctl & GEN8_DE_PCH_IRQ) {
2852 * FIXME(BDW): Assume for now that the new interrupt handling
2853 * scheme also closed the SDE interrupt handling race we've seen
2854 * on older pch-split platforms. But this needs testing.
2856 iir = I915_READ(SDEIIR);
2858 I915_WRITE(SDEIIR, iir);
2861 if (HAS_PCH_ICP(dev_priv))
2862 icp_irq_handler(dev_priv, iir);
2863 else if (HAS_PCH_SPT(dev_priv) ||
2864 HAS_PCH_KBP(dev_priv) ||
2865 HAS_PCH_CNP(dev_priv))
2866 spt_irq_handler(dev_priv, iir);
2868 cpt_irq_handler(dev_priv, iir);
2871 * Like on previous PCH there seems to be something
2872 * fishy going on with forwarding PCH interrupts.
2874 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2881 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2883 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2886 * Now with master disabled, get a sample of level indications
2887 * for this interrupt. Indications will be cleared on related acks.
2888 * New indications can and will light up during processing,
2889 * and will generate new interrupt after enabling master.
2891 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2894 static inline void gen8_master_intr_enable(void __iomem * const regs)
2896 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2899 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2901 struct drm_i915_private *dev_priv = to_i915(arg);
2902 void __iomem * const regs = dev_priv->regs;
2906 if (!intel_irqs_enabled(dev_priv))
2909 master_ctl = gen8_master_intr_disable(regs);
2911 gen8_master_intr_enable(regs);
2915 /* Find, clear, then process each source of interrupt */
2916 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2918 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2919 if (master_ctl & ~GEN8_GT_IRQS) {
2920 disable_rpm_wakeref_asserts(dev_priv);
2921 gen8_de_irq_handler(dev_priv, master_ctl);
2922 enable_rpm_wakeref_asserts(dev_priv);
2925 gen8_master_intr_enable(regs);
2927 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2933 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2934 const unsigned int bank, const unsigned int bit)
2936 void __iomem * const regs = i915->regs;
2940 lockdep_assert_held(&i915->irq_lock);
2942 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2945 * NB: Specs do not specify how long to spin wait,
2946 * so we do ~100us as an educated guess.
2948 timeout_ts = (local_clock() >> 10) + 100;
2950 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2951 } while (!(ident & GEN11_INTR_DATA_VALID) &&
2952 !time_after32(local_clock() >> 10, timeout_ts));
2954 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2955 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2960 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2961 GEN11_INTR_DATA_VALID);
2967 gen11_other_irq_handler(struct drm_i915_private * const i915,
2968 const u8 instance, const u16 iir)
2970 if (instance == OTHER_GTPM_INSTANCE)
2971 return gen6_rps_irq_handler(i915, iir);
2973 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
2978 gen11_engine_irq_handler(struct drm_i915_private * const i915,
2979 const u8 class, const u8 instance, const u16 iir)
2981 struct intel_engine_cs *engine;
2983 if (instance <= MAX_ENGINE_INSTANCE)
2984 engine = i915->engine_class[class][instance];
2989 return gen8_cs_irq_handler(engine, iir);
2991 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
2996 gen11_gt_identity_handler(struct drm_i915_private * const i915,
2999 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3000 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3001 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3003 if (unlikely(!intr))
3006 if (class <= COPY_ENGINE_CLASS)
3007 return gen11_engine_irq_handler(i915, class, instance, intr);
3009 if (class == OTHER_CLASS)
3010 return gen11_other_irq_handler(i915, instance, intr);
3012 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3013 class, instance, intr);
3017 gen11_gt_bank_handler(struct drm_i915_private * const i915,
3018 const unsigned int bank)
3020 void __iomem * const regs = i915->regs;
3021 unsigned long intr_dw;
3024 lockdep_assert_held(&i915->irq_lock);
3026 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3028 if (unlikely(!intr_dw)) {
3029 DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3033 for_each_set_bit(bit, &intr_dw, 32) {
3034 const u32 ident = gen11_gt_engine_identity(i915,
3037 gen11_gt_identity_handler(i915, ident);
3040 /* Clear must be after shared has been served for engine */
3041 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3045 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3046 const u32 master_ctl)
3050 spin_lock(&i915->irq_lock);
3052 for (bank = 0; bank < 2; bank++) {
3053 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3054 gen11_gt_bank_handler(i915, bank);
3057 spin_unlock(&i915->irq_lock);
3061 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3063 void __iomem * const regs = dev_priv->regs;
3066 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3069 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3071 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3077 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3079 if (iir & GEN11_GU_MISC_GSE)
3080 intel_opregion_asle_intr(dev_priv);
3083 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3085 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3088 * Now with master disabled, get a sample of level indications
3089 * for this interrupt. Indications will be cleared on related acks.
3090 * New indications can and will light up during processing,
3091 * and will generate new interrupt after enabling master.
3093 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3096 static inline void gen11_master_intr_enable(void __iomem * const regs)
3098 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3101 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3103 struct drm_i915_private * const i915 = to_i915(arg);
3104 void __iomem * const regs = i915->regs;
3108 if (!intel_irqs_enabled(i915))
3111 master_ctl = gen11_master_intr_disable(regs);
3113 gen11_master_intr_enable(regs);
3117 /* Find, clear, then process each source of interrupt. */
3118 gen11_gt_irq_handler(i915, master_ctl);
3120 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3121 if (master_ctl & GEN11_DISPLAY_IRQ) {
3122 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3124 disable_rpm_wakeref_asserts(i915);
3126 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3127 * for the display related bits.
3129 gen8_de_irq_handler(i915, disp_ctl);
3130 enable_rpm_wakeref_asserts(i915);
3133 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3135 gen11_master_intr_enable(regs);
3137 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3142 /* Called from drm generic code, passed 'crtc' which
3143 * we use as a pipe index
3145 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3147 struct drm_i915_private *dev_priv = to_i915(dev);
3148 unsigned long irqflags;
3150 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3151 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3152 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3157 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3159 struct drm_i915_private *dev_priv = to_i915(dev);
3160 unsigned long irqflags;
3162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3163 i915_enable_pipestat(dev_priv, pipe,
3164 PIPE_START_VBLANK_INTERRUPT_STATUS);
3165 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3170 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3172 struct drm_i915_private *dev_priv = to_i915(dev);
3173 unsigned long irqflags;
3174 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3175 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3177 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3178 ilk_enable_display_irq(dev_priv, bit);
3179 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3181 /* Even though there is no DMC, frame counter can get stuck when
3182 * PSR is active as no frames are generated.
3184 if (HAS_PSR(dev_priv))
3185 drm_vblank_restore(dev, pipe);
3190 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3192 struct drm_i915_private *dev_priv = to_i915(dev);
3193 unsigned long irqflags;
3195 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3196 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3197 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3199 /* Even if there is no DMC, frame counter can get stuck when
3200 * PSR is active as no frames are generated, so check only for PSR.
3202 if (HAS_PSR(dev_priv))
3203 drm_vblank_restore(dev, pipe);
3208 /* Called from drm generic code, passed 'crtc' which
3209 * we use as a pipe index
3211 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3213 struct drm_i915_private *dev_priv = to_i915(dev);
3214 unsigned long irqflags;
3216 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3217 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3218 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3221 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3223 struct drm_i915_private *dev_priv = to_i915(dev);
3224 unsigned long irqflags;
3226 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3227 i915_disable_pipestat(dev_priv, pipe,
3228 PIPE_START_VBLANK_INTERRUPT_STATUS);
3229 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3232 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3234 struct drm_i915_private *dev_priv = to_i915(dev);
3235 unsigned long irqflags;
3236 u32 bit = INTEL_GEN(dev_priv) >= 7 ?
3237 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3239 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3240 ilk_disable_display_irq(dev_priv, bit);
3241 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3244 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3246 struct drm_i915_private *dev_priv = to_i915(dev);
3247 unsigned long irqflags;
3249 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3250 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3251 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3254 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3256 if (HAS_PCH_NOP(dev_priv))
3259 GEN3_IRQ_RESET(SDE);
3261 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3262 I915_WRITE(SERR_INT, 0xffffffff);
3266 * SDEIER is also touched by the interrupt handler to work around missed PCH
3267 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3268 * instead we unconditionally enable all PCH interrupt sources here, but then
3269 * only unmask them as needed with SDEIMR.
3271 * This function needs to be called before interrupts are enabled.
3273 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3275 struct drm_i915_private *dev_priv = to_i915(dev);
3277 if (HAS_PCH_NOP(dev_priv))
3280 WARN_ON(I915_READ(SDEIER) != 0);
3281 I915_WRITE(SDEIER, 0xffffffff);
3282 POSTING_READ(SDEIER);
3285 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3288 if (INTEL_GEN(dev_priv) >= 6)
3289 GEN3_IRQ_RESET(GEN6_PM);
3292 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3294 if (IS_CHERRYVIEW(dev_priv))
3295 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3297 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3299 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3300 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3302 i9xx_pipestat_irq_reset(dev_priv);
3304 GEN3_IRQ_RESET(VLV_);
3305 dev_priv->irq_mask = ~0u;
3308 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3314 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3316 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3317 for_each_pipe(dev_priv, pipe)
3318 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3320 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3321 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3322 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3323 I915_LPE_PIPE_A_INTERRUPT |
3324 I915_LPE_PIPE_B_INTERRUPT;
3326 if (IS_CHERRYVIEW(dev_priv))
3327 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3328 I915_LPE_PIPE_C_INTERRUPT;
3330 WARN_ON(dev_priv->irq_mask != ~0u);
3332 dev_priv->irq_mask = ~enable_mask;
3334 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3339 static void ironlake_irq_reset(struct drm_device *dev)
3341 struct drm_i915_private *dev_priv = to_i915(dev);
3344 if (IS_GEN(dev_priv, 7))
3345 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3347 if (IS_HASWELL(dev_priv)) {
3348 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3349 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3352 gen5_gt_irq_reset(dev_priv);
3354 ibx_irq_reset(dev_priv);
3357 static void valleyview_irq_reset(struct drm_device *dev)
3359 struct drm_i915_private *dev_priv = to_i915(dev);
3361 I915_WRITE(VLV_MASTER_IER, 0);
3362 POSTING_READ(VLV_MASTER_IER);
3364 gen5_gt_irq_reset(dev_priv);
3366 spin_lock_irq(&dev_priv->irq_lock);
3367 if (dev_priv->display_irqs_enabled)
3368 vlv_display_irq_reset(dev_priv);
3369 spin_unlock_irq(&dev_priv->irq_lock);
3372 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3374 GEN8_IRQ_RESET_NDX(GT, 0);
3375 GEN8_IRQ_RESET_NDX(GT, 1);
3376 GEN8_IRQ_RESET_NDX(GT, 2);
3377 GEN8_IRQ_RESET_NDX(GT, 3);
3380 static void gen8_irq_reset(struct drm_device *dev)
3382 struct drm_i915_private *dev_priv = to_i915(dev);
3385 gen8_master_intr_disable(dev_priv->regs);
3387 gen8_gt_irq_reset(dev_priv);
3389 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3390 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3392 for_each_pipe(dev_priv, pipe)
3393 if (intel_display_power_is_enabled(dev_priv,
3394 POWER_DOMAIN_PIPE(pipe)))
3395 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3397 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3398 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3399 GEN3_IRQ_RESET(GEN8_PCU_);
3401 if (HAS_PCH_SPLIT(dev_priv))
3402 ibx_irq_reset(dev_priv);
3405 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3407 /* Disable RCS, BCS, VCS and VECS class engines. */
3408 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3409 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
3411 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3412 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
3413 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
3414 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
3415 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
3416 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3418 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3419 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
3422 static void gen11_irq_reset(struct drm_device *dev)
3424 struct drm_i915_private *dev_priv = dev->dev_private;
3427 gen11_master_intr_disable(dev_priv->regs);
3429 gen11_gt_irq_reset(dev_priv);
3431 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3433 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3434 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3436 for_each_pipe(dev_priv, pipe)
3437 if (intel_display_power_is_enabled(dev_priv,
3438 POWER_DOMAIN_PIPE(pipe)))
3439 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3441 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3442 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3443 GEN3_IRQ_RESET(GEN11_DE_HPD_);
3444 GEN3_IRQ_RESET(GEN11_GU_MISC_);
3445 GEN3_IRQ_RESET(GEN8_PCU_);
3447 if (HAS_PCH_ICP(dev_priv))
3448 GEN3_IRQ_RESET(SDE);
3451 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3454 u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3457 spin_lock_irq(&dev_priv->irq_lock);
3459 if (!intel_irqs_enabled(dev_priv)) {
3460 spin_unlock_irq(&dev_priv->irq_lock);
3464 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3465 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3466 dev_priv->de_irq_mask[pipe],
3467 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3469 spin_unlock_irq(&dev_priv->irq_lock);
3472 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3477 spin_lock_irq(&dev_priv->irq_lock);
3479 if (!intel_irqs_enabled(dev_priv)) {
3480 spin_unlock_irq(&dev_priv->irq_lock);
3484 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3485 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3487 spin_unlock_irq(&dev_priv->irq_lock);
3489 /* make sure we're done processing display irqs */
3490 synchronize_irq(dev_priv->drm.irq);
3493 static void cherryview_irq_reset(struct drm_device *dev)
3495 struct drm_i915_private *dev_priv = to_i915(dev);
3497 I915_WRITE(GEN8_MASTER_IRQ, 0);
3498 POSTING_READ(GEN8_MASTER_IRQ);
3500 gen8_gt_irq_reset(dev_priv);
3502 GEN3_IRQ_RESET(GEN8_PCU_);
3504 spin_lock_irq(&dev_priv->irq_lock);
3505 if (dev_priv->display_irqs_enabled)
3506 vlv_display_irq_reset(dev_priv);
3507 spin_unlock_irq(&dev_priv->irq_lock);
3510 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3511 const u32 hpd[HPD_NUM_PINS])
3513 struct intel_encoder *encoder;
3514 u32 enabled_irqs = 0;
3516 for_each_intel_encoder(&dev_priv->drm, encoder)
3517 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3518 enabled_irqs |= hpd[encoder->hpd_pin];
3520 return enabled_irqs;
3523 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3528 * Enable digital hotplug on the PCH, and configure the DP short pulse
3529 * duration to 2ms (which is the minimum in the Display Port spec).
3530 * The pulse duration bits are reserved on LPT+.
3532 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3533 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3534 PORTC_PULSE_DURATION_MASK |
3535 PORTD_PULSE_DURATION_MASK);
3536 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3537 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3538 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3540 * When CPU and PCH are on the same package, port A
3541 * HPD must be enabled in both north and south.
3543 if (HAS_PCH_LPT_LP(dev_priv))
3544 hotplug |= PORTA_HOTPLUG_ENABLE;
3545 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3548 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3550 u32 hotplug_irqs, enabled_irqs;
3552 if (HAS_PCH_IBX(dev_priv)) {
3553 hotplug_irqs = SDE_HOTPLUG_MASK;
3554 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3556 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3560 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3562 ibx_hpd_detection_setup(dev_priv);
3565 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3569 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3570 hotplug |= ICP_DDIA_HPD_ENABLE |
3571 ICP_DDIB_HPD_ENABLE;
3572 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3574 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3575 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3576 ICP_TC_HPD_ENABLE(PORT_TC2) |
3577 ICP_TC_HPD_ENABLE(PORT_TC3) |
3578 ICP_TC_HPD_ENABLE(PORT_TC4);
3579 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3582 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3584 u32 hotplug_irqs, enabled_irqs;
3586 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3587 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3589 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3591 icp_hpd_detection_setup(dev_priv);
3594 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3598 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3599 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3600 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3601 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3602 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3603 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3605 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3606 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3607 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3608 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3609 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3610 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3613 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3615 u32 hotplug_irqs, enabled_irqs;
3618 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3619 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3621 val = I915_READ(GEN11_DE_HPD_IMR);
3622 val &= ~hotplug_irqs;
3623 I915_WRITE(GEN11_DE_HPD_IMR, val);
3624 POSTING_READ(GEN11_DE_HPD_IMR);
3626 gen11_hpd_detection_setup(dev_priv);
3628 if (HAS_PCH_ICP(dev_priv))
3629 icp_hpd_irq_setup(dev_priv);
3632 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3636 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3637 if (HAS_PCH_CNP(dev_priv)) {
3638 val = I915_READ(SOUTH_CHICKEN1);
3639 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3640 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3641 I915_WRITE(SOUTH_CHICKEN1, val);
3644 /* Enable digital hotplug on the PCH */
3645 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3646 hotplug |= PORTA_HOTPLUG_ENABLE |
3647 PORTB_HOTPLUG_ENABLE |
3648 PORTC_HOTPLUG_ENABLE |
3649 PORTD_HOTPLUG_ENABLE;
3650 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3652 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3653 hotplug |= PORTE_HOTPLUG_ENABLE;
3654 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3657 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3659 u32 hotplug_irqs, enabled_irqs;
3661 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3662 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3664 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3666 spt_hpd_detection_setup(dev_priv);
3669 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3674 * Enable digital hotplug on the CPU, and configure the DP short pulse
3675 * duration to 2ms (which is the minimum in the Display Port spec)
3676 * The pulse duration bits are reserved on HSW+.
3678 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3679 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3680 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3681 DIGITAL_PORTA_PULSE_DURATION_2ms;
3682 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3685 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3687 u32 hotplug_irqs, enabled_irqs;
3689 if (INTEL_GEN(dev_priv) >= 8) {
3690 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3691 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3693 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3694 } else if (INTEL_GEN(dev_priv) >= 7) {
3695 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3696 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3698 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3700 hotplug_irqs = DE_DP_A_HOTPLUG;
3701 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3703 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3706 ilk_hpd_detection_setup(dev_priv);
3708 ibx_hpd_irq_setup(dev_priv);
3711 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3716 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3717 hotplug |= PORTA_HOTPLUG_ENABLE |
3718 PORTB_HOTPLUG_ENABLE |
3719 PORTC_HOTPLUG_ENABLE;
3721 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3722 hotplug, enabled_irqs);
3723 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3726 * For BXT invert bit has to be set based on AOB design
3727 * for HPD detection logic, update it based on VBT fields.
3729 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3730 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3731 hotplug |= BXT_DDIA_HPD_INVERT;
3732 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3733 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3734 hotplug |= BXT_DDIB_HPD_INVERT;
3735 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3736 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3737 hotplug |= BXT_DDIC_HPD_INVERT;
3739 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3742 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3744 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3747 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3749 u32 hotplug_irqs, enabled_irqs;
3751 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3752 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3754 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3756 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3759 static void ibx_irq_postinstall(struct drm_device *dev)
3761 struct drm_i915_private *dev_priv = to_i915(dev);
3764 if (HAS_PCH_NOP(dev_priv))
3767 if (HAS_PCH_IBX(dev_priv))
3768 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3769 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3770 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3772 mask = SDE_GMBUS_CPT;
3774 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
3775 I915_WRITE(SDEIMR, ~mask);
3777 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3778 HAS_PCH_LPT(dev_priv))
3779 ibx_hpd_detection_setup(dev_priv);
3781 spt_hpd_detection_setup(dev_priv);
3784 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3786 struct drm_i915_private *dev_priv = to_i915(dev);
3787 u32 pm_irqs, gt_irqs;
3789 pm_irqs = gt_irqs = 0;
3791 dev_priv->gt_irq_mask = ~0;
3792 if (HAS_L3_DPF(dev_priv)) {
3793 /* L3 parity interrupt is always unmasked. */
3794 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3795 gt_irqs |= GT_PARITY_ERROR(dev_priv);
3798 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3799 if (IS_GEN(dev_priv, 5)) {
3800 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3802 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3805 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3807 if (INTEL_GEN(dev_priv) >= 6) {
3809 * RPS interrupts will get enabled/disabled on demand when RPS
3810 * itself is enabled/disabled.
3812 if (HAS_VEBOX(dev_priv)) {
3813 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3814 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3817 dev_priv->pm_imr = 0xffffffff;
3818 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
3822 static int ironlake_irq_postinstall(struct drm_device *dev)
3824 struct drm_i915_private *dev_priv = to_i915(dev);
3825 u32 display_mask, extra_mask;
3827 if (INTEL_GEN(dev_priv) >= 7) {
3828 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3829 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3830 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3831 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3832 DE_DP_A_HOTPLUG_IVB);
3834 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3835 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3836 DE_PIPEA_CRC_DONE | DE_POISON);
3837 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3838 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3842 if (IS_HASWELL(dev_priv)) {
3843 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
3844 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3845 display_mask |= DE_EDP_PSR_INT_HSW;
3848 dev_priv->irq_mask = ~display_mask;
3850 ibx_irq_pre_postinstall(dev);
3852 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3854 gen5_gt_irq_postinstall(dev);
3856 ilk_hpd_detection_setup(dev_priv);
3858 ibx_irq_postinstall(dev);
3860 if (IS_IRONLAKE_M(dev_priv)) {
3861 /* Enable PCU event interrupts
3863 * spinlocking not required here for correctness since interrupt
3864 * setup is guaranteed to run in single-threaded context. But we
3865 * need it to make the assert_spin_locked happy. */
3866 spin_lock_irq(&dev_priv->irq_lock);
3867 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3868 spin_unlock_irq(&dev_priv->irq_lock);
3874 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3876 lockdep_assert_held(&dev_priv->irq_lock);
3878 if (dev_priv->display_irqs_enabled)
3881 dev_priv->display_irqs_enabled = true;
3883 if (intel_irqs_enabled(dev_priv)) {
3884 vlv_display_irq_reset(dev_priv);
3885 vlv_display_irq_postinstall(dev_priv);
3889 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3891 lockdep_assert_held(&dev_priv->irq_lock);
3893 if (!dev_priv->display_irqs_enabled)
3896 dev_priv->display_irqs_enabled = false;
3898 if (intel_irqs_enabled(dev_priv))
3899 vlv_display_irq_reset(dev_priv);
3903 static int valleyview_irq_postinstall(struct drm_device *dev)
3905 struct drm_i915_private *dev_priv = to_i915(dev);
3907 gen5_gt_irq_postinstall(dev);
3909 spin_lock_irq(&dev_priv->irq_lock);
3910 if (dev_priv->display_irqs_enabled)
3911 vlv_display_irq_postinstall(dev_priv);
3912 spin_unlock_irq(&dev_priv->irq_lock);
3914 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3915 POSTING_READ(VLV_MASTER_IER);
3920 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3922 /* These are interrupts we'll toggle with the ring mask register */
3923 u32 gt_interrupts[] = {
3924 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3925 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3926 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3927 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3928 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3929 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3930 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3931 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3933 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3934 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3937 dev_priv->pm_ier = 0x0;
3938 dev_priv->pm_imr = ~dev_priv->pm_ier;
3939 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3940 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3942 * RPS interrupts will get enabled/disabled on demand when RPS itself
3943 * is enabled/disabled. Same wil be the case for GuC interrupts.
3945 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
3946 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3949 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3951 u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3952 u32 de_pipe_enables;
3953 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3954 u32 de_port_enables;
3955 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3958 if (INTEL_GEN(dev_priv) <= 10)
3959 de_misc_masked |= GEN8_DE_MISC_GSE;
3961 if (INTEL_GEN(dev_priv) >= 9) {
3962 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3963 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3965 if (IS_GEN9_LP(dev_priv))
3966 de_port_masked |= BXT_DE_PORT_GMBUS;
3968 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3971 if (INTEL_GEN(dev_priv) >= 11)
3972 de_port_masked |= ICL_AUX_CHANNEL_E;
3974 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
3975 de_port_masked |= CNL_AUX_CHANNEL_F;
3977 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3978 GEN8_PIPE_FIFO_UNDERRUN;
3980 de_port_enables = de_port_masked;
3981 if (IS_GEN9_LP(dev_priv))
3982 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3983 else if (IS_BROADWELL(dev_priv))
3984 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3986 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
3987 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
3989 for_each_pipe(dev_priv, pipe) {
3990 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3992 if (intel_display_power_is_enabled(dev_priv,
3993 POWER_DOMAIN_PIPE(pipe)))
3994 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3995 dev_priv->de_irq_mask[pipe],
3999 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4000 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4002 if (INTEL_GEN(dev_priv) >= 11) {
4003 u32 de_hpd_masked = 0;
4004 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4005 GEN11_DE_TBT_HOTPLUG_MASK;
4007 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4008 gen11_hpd_detection_setup(dev_priv);
4009 } else if (IS_GEN9_LP(dev_priv)) {
4010 bxt_hpd_detection_setup(dev_priv);
4011 } else if (IS_BROADWELL(dev_priv)) {
4012 ilk_hpd_detection_setup(dev_priv);
4016 static int gen8_irq_postinstall(struct drm_device *dev)
4018 struct drm_i915_private *dev_priv = to_i915(dev);
4020 if (HAS_PCH_SPLIT(dev_priv))
4021 ibx_irq_pre_postinstall(dev);
4023 gen8_gt_irq_postinstall(dev_priv);
4024 gen8_de_irq_postinstall(dev_priv);
4026 if (HAS_PCH_SPLIT(dev_priv))
4027 ibx_irq_postinstall(dev);
4029 gen8_master_intr_enable(dev_priv->regs);
4034 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4036 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4038 BUILD_BUG_ON(irqs & 0xffff0000);
4040 /* Enable RCS, BCS, VCS and VECS class interrupts. */
4041 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4042 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
4044 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4045 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
4046 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
4047 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
4048 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
4049 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4052 * RPS interrupts will get enabled/disabled on demand when RPS itself
4053 * is enabled/disabled.
4055 dev_priv->pm_ier = 0x0;
4056 dev_priv->pm_imr = ~dev_priv->pm_ier;
4057 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4058 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
4061 static void icp_irq_postinstall(struct drm_device *dev)
4063 struct drm_i915_private *dev_priv = to_i915(dev);
4064 u32 mask = SDE_GMBUS_ICP;
4066 WARN_ON(I915_READ(SDEIER) != 0);
4067 I915_WRITE(SDEIER, 0xffffffff);
4068 POSTING_READ(SDEIER);
4070 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4071 I915_WRITE(SDEIMR, ~mask);
4073 icp_hpd_detection_setup(dev_priv);
4076 static int gen11_irq_postinstall(struct drm_device *dev)
4078 struct drm_i915_private *dev_priv = dev->dev_private;
4079 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4081 if (HAS_PCH_ICP(dev_priv))
4082 icp_irq_postinstall(dev);
4084 gen11_gt_irq_postinstall(dev_priv);
4085 gen8_de_irq_postinstall(dev_priv);
4087 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4089 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4091 gen11_master_intr_enable(dev_priv->regs);
4092 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4097 static int cherryview_irq_postinstall(struct drm_device *dev)
4099 struct drm_i915_private *dev_priv = to_i915(dev);
4101 gen8_gt_irq_postinstall(dev_priv);
4103 spin_lock_irq(&dev_priv->irq_lock);
4104 if (dev_priv->display_irqs_enabled)
4105 vlv_display_irq_postinstall(dev_priv);
4106 spin_unlock_irq(&dev_priv->irq_lock);
4108 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4109 POSTING_READ(GEN8_MASTER_IRQ);
4114 static void i8xx_irq_reset(struct drm_device *dev)
4116 struct drm_i915_private *dev_priv = to_i915(dev);
4118 i9xx_pipestat_irq_reset(dev_priv);
4123 static int i8xx_irq_postinstall(struct drm_device *dev)
4125 struct drm_i915_private *dev_priv = to_i915(dev);
4128 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4129 I915_ERROR_MEMORY_REFRESH));
4131 /* Unmask the interrupts that we always want on. */
4132 dev_priv->irq_mask =
4133 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4134 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4135 I915_MASTER_ERROR_INTERRUPT);
4138 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4139 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4140 I915_MASTER_ERROR_INTERRUPT |
4141 I915_USER_INTERRUPT;
4143 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4145 /* Interrupt setup is already guaranteed to be single-threaded, this is
4146 * just to make the assert_spin_locked check happy. */
4147 spin_lock_irq(&dev_priv->irq_lock);
4148 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4149 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4150 spin_unlock_irq(&dev_priv->irq_lock);
4155 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4156 u16 *eir, u16 *eir_stuck)
4160 *eir = I915_READ16(EIR);
4163 I915_WRITE16(EIR, *eir);
4165 *eir_stuck = I915_READ16(EIR);
4166 if (*eir_stuck == 0)
4170 * Toggle all EMR bits to make sure we get an edge
4171 * in the ISR master error bit if we don't clear
4172 * all the EIR bits. Otherwise the edge triggered
4173 * IIR on i965/g4x wouldn't notice that an interrupt
4174 * is still pending. Also some EIR bits can't be
4175 * cleared except by handling the underlying error
4176 * (or by a GPU reset) so we mask any bit that
4179 emr = I915_READ16(EMR);
4180 I915_WRITE16(EMR, 0xffff);
4181 I915_WRITE16(EMR, emr | *eir_stuck);
4184 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4185 u16 eir, u16 eir_stuck)
4187 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4190 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4193 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4194 u32 *eir, u32 *eir_stuck)
4198 *eir = I915_READ(EIR);
4200 I915_WRITE(EIR, *eir);
4202 *eir_stuck = I915_READ(EIR);
4203 if (*eir_stuck == 0)
4207 * Toggle all EMR bits to make sure we get an edge
4208 * in the ISR master error bit if we don't clear
4209 * all the EIR bits. Otherwise the edge triggered
4210 * IIR on i965/g4x wouldn't notice that an interrupt
4211 * is still pending. Also some EIR bits can't be
4212 * cleared except by handling the underlying error
4213 * (or by a GPU reset) so we mask any bit that
4216 emr = I915_READ(EMR);
4217 I915_WRITE(EMR, 0xffffffff);
4218 I915_WRITE(EMR, emr | *eir_stuck);
4221 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4222 u32 eir, u32 eir_stuck)
4224 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4227 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4230 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4232 struct drm_device *dev = arg;
4233 struct drm_i915_private *dev_priv = to_i915(dev);
4234 irqreturn_t ret = IRQ_NONE;
4236 if (!intel_irqs_enabled(dev_priv))
4239 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4240 disable_rpm_wakeref_asserts(dev_priv);
4243 u32 pipe_stats[I915_MAX_PIPES] = {};
4244 u16 eir = 0, eir_stuck = 0;
4247 iir = I915_READ16(IIR);
4253 /* Call regardless, as some status bits might not be
4254 * signalled in iir */
4255 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4257 if (iir & I915_MASTER_ERROR_INTERRUPT)
4258 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4260 I915_WRITE16(IIR, iir);
4262 if (iir & I915_USER_INTERRUPT)
4263 notify_ring(dev_priv->engine[RCS]);
4265 if (iir & I915_MASTER_ERROR_INTERRUPT)
4266 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4268 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4271 enable_rpm_wakeref_asserts(dev_priv);
4276 static void i915_irq_reset(struct drm_device *dev)
4278 struct drm_i915_private *dev_priv = to_i915(dev);
4280 if (I915_HAS_HOTPLUG(dev_priv)) {
4281 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4282 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4285 i9xx_pipestat_irq_reset(dev_priv);
4290 static int i915_irq_postinstall(struct drm_device *dev)
4292 struct drm_i915_private *dev_priv = to_i915(dev);
4295 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4296 I915_ERROR_MEMORY_REFRESH));
4298 /* Unmask the interrupts that we always want on. */
4299 dev_priv->irq_mask =
4300 ~(I915_ASLE_INTERRUPT |
4301 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4302 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4303 I915_MASTER_ERROR_INTERRUPT);
4306 I915_ASLE_INTERRUPT |
4307 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4308 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4309 I915_MASTER_ERROR_INTERRUPT |
4310 I915_USER_INTERRUPT;
4312 if (I915_HAS_HOTPLUG(dev_priv)) {
4313 /* Enable in IER... */
4314 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4315 /* and unmask in IMR */
4316 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4319 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4321 /* Interrupt setup is already guaranteed to be single-threaded, this is
4322 * just to make the assert_spin_locked check happy. */
4323 spin_lock_irq(&dev_priv->irq_lock);
4324 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4325 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4326 spin_unlock_irq(&dev_priv->irq_lock);
4328 i915_enable_asle_pipestat(dev_priv);
4333 static irqreturn_t i915_irq_handler(int irq, void *arg)
4335 struct drm_device *dev = arg;
4336 struct drm_i915_private *dev_priv = to_i915(dev);
4337 irqreturn_t ret = IRQ_NONE;
4339 if (!intel_irqs_enabled(dev_priv))
4342 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4343 disable_rpm_wakeref_asserts(dev_priv);
4346 u32 pipe_stats[I915_MAX_PIPES] = {};
4347 u32 eir = 0, eir_stuck = 0;
4348 u32 hotplug_status = 0;
4351 iir = I915_READ(IIR);
4357 if (I915_HAS_HOTPLUG(dev_priv) &&
4358 iir & I915_DISPLAY_PORT_INTERRUPT)
4359 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4361 /* Call regardless, as some status bits might not be
4362 * signalled in iir */
4363 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4365 if (iir & I915_MASTER_ERROR_INTERRUPT)
4366 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4368 I915_WRITE(IIR, iir);
4370 if (iir & I915_USER_INTERRUPT)
4371 notify_ring(dev_priv->engine[RCS]);
4373 if (iir & I915_MASTER_ERROR_INTERRUPT)
4374 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4377 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4379 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4382 enable_rpm_wakeref_asserts(dev_priv);
4387 static void i965_irq_reset(struct drm_device *dev)
4389 struct drm_i915_private *dev_priv = to_i915(dev);
4391 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4392 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4394 i9xx_pipestat_irq_reset(dev_priv);
4399 static int i965_irq_postinstall(struct drm_device *dev)
4401 struct drm_i915_private *dev_priv = to_i915(dev);
4406 * Enable some error detection, note the instruction error mask
4407 * bit is reserved, so we leave it masked.
4409 if (IS_G4X(dev_priv)) {
4410 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4411 GM45_ERROR_MEM_PRIV |
4412 GM45_ERROR_CP_PRIV |
4413 I915_ERROR_MEMORY_REFRESH);
4415 error_mask = ~(I915_ERROR_PAGE_TABLE |
4416 I915_ERROR_MEMORY_REFRESH);
4418 I915_WRITE(EMR, error_mask);
4420 /* Unmask the interrupts that we always want on. */
4421 dev_priv->irq_mask =
4422 ~(I915_ASLE_INTERRUPT |
4423 I915_DISPLAY_PORT_INTERRUPT |
4424 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4425 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4426 I915_MASTER_ERROR_INTERRUPT);
4429 I915_ASLE_INTERRUPT |
4430 I915_DISPLAY_PORT_INTERRUPT |
4431 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4432 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4433 I915_MASTER_ERROR_INTERRUPT |
4434 I915_USER_INTERRUPT;
4436 if (IS_G4X(dev_priv))
4437 enable_mask |= I915_BSD_USER_INTERRUPT;
4439 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4441 /* Interrupt setup is already guaranteed to be single-threaded, this is
4442 * just to make the assert_spin_locked check happy. */
4443 spin_lock_irq(&dev_priv->irq_lock);
4444 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4445 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4446 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4447 spin_unlock_irq(&dev_priv->irq_lock);
4449 i915_enable_asle_pipestat(dev_priv);
4454 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4458 lockdep_assert_held(&dev_priv->irq_lock);
4460 /* Note HDMI and DP share hotplug bits */
4461 /* enable bits are the same for all generations */
4462 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4463 /* Programming the CRT detection parameters tends
4464 to generate a spurious hotplug event about three
4465 seconds later. So just do it once.
4467 if (IS_G4X(dev_priv))
4468 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4469 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4471 /* Ignore TV since it's buggy */
4472 i915_hotplug_interrupt_update_locked(dev_priv,
4473 HOTPLUG_INT_EN_MASK |
4474 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4475 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4479 static irqreturn_t i965_irq_handler(int irq, void *arg)
4481 struct drm_device *dev = arg;
4482 struct drm_i915_private *dev_priv = to_i915(dev);
4483 irqreturn_t ret = IRQ_NONE;
4485 if (!intel_irqs_enabled(dev_priv))
4488 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4489 disable_rpm_wakeref_asserts(dev_priv);
4492 u32 pipe_stats[I915_MAX_PIPES] = {};
4493 u32 eir = 0, eir_stuck = 0;
4494 u32 hotplug_status = 0;
4497 iir = I915_READ(IIR);
4503 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4504 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4506 /* Call regardless, as some status bits might not be
4507 * signalled in iir */
4508 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4510 if (iir & I915_MASTER_ERROR_INTERRUPT)
4511 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4513 I915_WRITE(IIR, iir);
4515 if (iir & I915_USER_INTERRUPT)
4516 notify_ring(dev_priv->engine[RCS]);
4518 if (iir & I915_BSD_USER_INTERRUPT)
4519 notify_ring(dev_priv->engine[VCS]);
4521 if (iir & I915_MASTER_ERROR_INTERRUPT)
4522 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4525 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4527 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4530 enable_rpm_wakeref_asserts(dev_priv);
4536 * intel_irq_init - initializes irq support
4537 * @dev_priv: i915 device instance
4539 * This function initializes all the irq support including work items, timers
4540 * and all the vtables. It does not setup the interrupt itself though.
4542 void intel_irq_init(struct drm_i915_private *dev_priv)
4544 struct drm_device *dev = &dev_priv->drm;
4545 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4548 intel_hpd_init_work(dev_priv);
4550 INIT_WORK(&rps->work, gen6_pm_rps_work);
4552 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4553 for (i = 0; i < MAX_L3_SLICES; ++i)
4554 dev_priv->l3_parity.remap_info[i] = NULL;
4556 if (HAS_GUC_SCHED(dev_priv))
4557 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4559 /* Let's track the enabled rps events */
4560 if (IS_VALLEYVIEW(dev_priv))
4561 /* WaGsvRC0ResidencyMethod:vlv */
4562 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4564 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4565 GEN6_PM_RP_DOWN_THRESHOLD |
4566 GEN6_PM_RP_DOWN_TIMEOUT);
4568 rps->pm_intrmsk_mbz = 0;
4571 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4572 * if GEN6_PM_UP_EI_EXPIRED is masked.
4574 * TODO: verify if this can be reproduced on VLV,CHV.
4576 if (INTEL_GEN(dev_priv) <= 7)
4577 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4579 if (INTEL_GEN(dev_priv) >= 8)
4580 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4582 if (IS_GEN(dev_priv, 2)) {
4583 /* Gen2 doesn't have a hardware frame counter */
4584 dev->max_vblank_count = 0;
4585 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4586 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4587 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4589 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4590 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4594 * Opt out of the vblank disable timer on everything except gen2.
4595 * Gen2 doesn't have a hardware frame counter and so depends on
4596 * vblank interrupts to produce sane vblank seuquence numbers.
4598 if (!IS_GEN(dev_priv, 2))
4599 dev->vblank_disable_immediate = true;
4601 /* Most platforms treat the display irq block as an always-on
4602 * power domain. vlv/chv can disable it at runtime and need
4603 * special care to avoid writing any of the display block registers
4604 * outside of the power domain. We defer setting up the display irqs
4605 * in this case to the runtime pm.
4607 dev_priv->display_irqs_enabled = true;
4608 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4609 dev_priv->display_irqs_enabled = false;
4611 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4612 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4613 * detection, as short HPD storms will occur as a natural part of
4614 * sideband messaging with MST.
4615 * On older platforms however, IRQ storms can occur with both long and
4616 * short pulses, as seen on some G4x systems.
4618 dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4620 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4621 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4623 if (IS_CHERRYVIEW(dev_priv)) {
4624 dev->driver->irq_handler = cherryview_irq_handler;
4625 dev->driver->irq_preinstall = cherryview_irq_reset;
4626 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4627 dev->driver->irq_uninstall = cherryview_irq_reset;
4628 dev->driver->enable_vblank = i965_enable_vblank;
4629 dev->driver->disable_vblank = i965_disable_vblank;
4630 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4631 } else if (IS_VALLEYVIEW(dev_priv)) {
4632 dev->driver->irq_handler = valleyview_irq_handler;
4633 dev->driver->irq_preinstall = valleyview_irq_reset;
4634 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4635 dev->driver->irq_uninstall = valleyview_irq_reset;
4636 dev->driver->enable_vblank = i965_enable_vblank;
4637 dev->driver->disable_vblank = i965_disable_vblank;
4638 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4639 } else if (INTEL_GEN(dev_priv) >= 11) {
4640 dev->driver->irq_handler = gen11_irq_handler;
4641 dev->driver->irq_preinstall = gen11_irq_reset;
4642 dev->driver->irq_postinstall = gen11_irq_postinstall;
4643 dev->driver->irq_uninstall = gen11_irq_reset;
4644 dev->driver->enable_vblank = gen8_enable_vblank;
4645 dev->driver->disable_vblank = gen8_disable_vblank;
4646 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4647 } else if (INTEL_GEN(dev_priv) >= 8) {
4648 dev->driver->irq_handler = gen8_irq_handler;
4649 dev->driver->irq_preinstall = gen8_irq_reset;
4650 dev->driver->irq_postinstall = gen8_irq_postinstall;
4651 dev->driver->irq_uninstall = gen8_irq_reset;
4652 dev->driver->enable_vblank = gen8_enable_vblank;
4653 dev->driver->disable_vblank = gen8_disable_vblank;
4654 if (IS_GEN9_LP(dev_priv))
4655 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4656 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4657 HAS_PCH_CNP(dev_priv))
4658 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4660 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4661 } else if (HAS_PCH_SPLIT(dev_priv)) {
4662 dev->driver->irq_handler = ironlake_irq_handler;
4663 dev->driver->irq_preinstall = ironlake_irq_reset;
4664 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4665 dev->driver->irq_uninstall = ironlake_irq_reset;
4666 dev->driver->enable_vblank = ironlake_enable_vblank;
4667 dev->driver->disable_vblank = ironlake_disable_vblank;
4668 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4670 if (IS_GEN(dev_priv, 2)) {
4671 dev->driver->irq_preinstall = i8xx_irq_reset;
4672 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4673 dev->driver->irq_handler = i8xx_irq_handler;
4674 dev->driver->irq_uninstall = i8xx_irq_reset;
4675 dev->driver->enable_vblank = i8xx_enable_vblank;
4676 dev->driver->disable_vblank = i8xx_disable_vblank;
4677 } else if (IS_GEN(dev_priv, 3)) {
4678 dev->driver->irq_preinstall = i915_irq_reset;
4679 dev->driver->irq_postinstall = i915_irq_postinstall;
4680 dev->driver->irq_uninstall = i915_irq_reset;
4681 dev->driver->irq_handler = i915_irq_handler;
4682 dev->driver->enable_vblank = i8xx_enable_vblank;
4683 dev->driver->disable_vblank = i8xx_disable_vblank;
4685 dev->driver->irq_preinstall = i965_irq_reset;
4686 dev->driver->irq_postinstall = i965_irq_postinstall;
4687 dev->driver->irq_uninstall = i965_irq_reset;
4688 dev->driver->irq_handler = i965_irq_handler;
4689 dev->driver->enable_vblank = i965_enable_vblank;
4690 dev->driver->disable_vblank = i965_disable_vblank;
4692 if (I915_HAS_HOTPLUG(dev_priv))
4693 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4698 * intel_irq_fini - deinitializes IRQ support
4699 * @i915: i915 device instance
4701 * This function deinitializes all the IRQ support.
4703 void intel_irq_fini(struct drm_i915_private *i915)
4707 for (i = 0; i < MAX_L3_SLICES; ++i)
4708 kfree(i915->l3_parity.remap_info[i]);
4712 * intel_irq_install - enables the hardware interrupt
4713 * @dev_priv: i915 device instance
4715 * This function enables the hardware interrupt handling, but leaves the hotplug
4716 * handling still disabled. It is called after intel_irq_init().
4718 * In the driver load and resume code we need working interrupts in a few places
4719 * but don't want to deal with the hassle of concurrent probe and hotplug
4720 * workers. Hence the split into this two-stage approach.
4722 int intel_irq_install(struct drm_i915_private *dev_priv)
4725 * We enable some interrupt sources in our postinstall hooks, so mark
4726 * interrupts as enabled _before_ actually enabling them to avoid
4727 * special cases in our ordering checks.
4729 dev_priv->runtime_pm.irqs_enabled = true;
4731 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4735 * intel_irq_uninstall - finilizes all irq handling
4736 * @dev_priv: i915 device instance
4738 * This stops interrupt and hotplug handling and unregisters and frees all
4739 * resources acquired in the init functions.
4741 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4743 drm_irq_uninstall(&dev_priv->drm);
4744 intel_hpd_cancel_work(dev_priv);
4745 dev_priv->runtime_pm.irqs_enabled = false;
4749 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4750 * @dev_priv: i915 device instance
4752 * This function is used to disable interrupts at runtime, both in the runtime
4753 * pm and the system suspend/resume code.
4755 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4757 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4758 dev_priv->runtime_pm.irqs_enabled = false;
4759 synchronize_irq(dev_priv->drm.irq);
4763 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4764 * @dev_priv: i915 device instance
4766 * This function is used to enable interrupts at runtime, both in the runtime
4767 * pm and the system suspend/resume code.
4769 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4771 dev_priv->runtime_pm.irqs_enabled = true;
4772 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4773 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);