drm/i915/panel: Track temporary rpm wakeref
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 /**
40  * DOC: interrupt handling
41  *
42  * These functions provide the basic support for enabling and disabling the
43  * interrupt handling support. There's a lot more functionality in i915_irq.c
44  * and related files, but that will be described in separate chapters.
45  */
46
47 static const u32 hpd_ilk[HPD_NUM_PINS] = {
48         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
49 };
50
51 static const u32 hpd_ivb[HPD_NUM_PINS] = {
52         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
53 };
54
55 static const u32 hpd_bdw[HPD_NUM_PINS] = {
56         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
57 };
58
59 static const u32 hpd_ibx[HPD_NUM_PINS] = {
60         [HPD_CRT] = SDE_CRT_HOTPLUG,
61         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
62         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
63         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
64         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
65 };
66
67 static const u32 hpd_cpt[HPD_NUM_PINS] = {
68         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
69         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
70         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
71         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
72         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
73 };
74
75 static const u32 hpd_spt[HPD_NUM_PINS] = {
76         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
77         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
78         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
79         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
80         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
81 };
82
83 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
84         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
85         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
86         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
87         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
88         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
89         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
90 };
91
92 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
93         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
94         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
95         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
96         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
97         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
98         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
99 };
100
101 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
102         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
103         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
104         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
105         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
106         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
107         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
108 };
109
110 /* BXT hpd list */
111 static const u32 hpd_bxt[HPD_NUM_PINS] = {
112         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
113         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
114         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
115 };
116
117 static const u32 hpd_gen11[HPD_NUM_PINS] = {
118         [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
119         [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
120         [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
121         [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
122 };
123
124 static const u32 hpd_icp[HPD_NUM_PINS] = {
125         [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
126         [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
127         [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
128         [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
129         [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
130         [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
131 };
132
133 /* IIR can theoretically queue up two events. Be paranoid. */
134 #define GEN8_IRQ_RESET_NDX(type, which) do { \
135         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
136         POSTING_READ(GEN8_##type##_IMR(which)); \
137         I915_WRITE(GEN8_##type##_IER(which), 0); \
138         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
139         POSTING_READ(GEN8_##type##_IIR(which)); \
140         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
141         POSTING_READ(GEN8_##type##_IIR(which)); \
142 } while (0)
143
144 #define GEN3_IRQ_RESET(type) do { \
145         I915_WRITE(type##IMR, 0xffffffff); \
146         POSTING_READ(type##IMR); \
147         I915_WRITE(type##IER, 0); \
148         I915_WRITE(type##IIR, 0xffffffff); \
149         POSTING_READ(type##IIR); \
150         I915_WRITE(type##IIR, 0xffffffff); \
151         POSTING_READ(type##IIR); \
152 } while (0)
153
154 #define GEN2_IRQ_RESET(type) do { \
155         I915_WRITE16(type##IMR, 0xffff); \
156         POSTING_READ16(type##IMR); \
157         I915_WRITE16(type##IER, 0); \
158         I915_WRITE16(type##IIR, 0xffff); \
159         POSTING_READ16(type##IIR); \
160         I915_WRITE16(type##IIR, 0xffff); \
161         POSTING_READ16(type##IIR); \
162 } while (0)
163
164 /*
165  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
166  */
167 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
168                                     i915_reg_t reg)
169 {
170         u32 val = I915_READ(reg);
171
172         if (val == 0)
173                 return;
174
175         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
176              i915_mmio_reg_offset(reg), val);
177         I915_WRITE(reg, 0xffffffff);
178         POSTING_READ(reg);
179         I915_WRITE(reg, 0xffffffff);
180         POSTING_READ(reg);
181 }
182
183 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
184                                     i915_reg_t reg)
185 {
186         u16 val = I915_READ16(reg);
187
188         if (val == 0)
189                 return;
190
191         WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
192              i915_mmio_reg_offset(reg), val);
193         I915_WRITE16(reg, 0xffff);
194         POSTING_READ16(reg);
195         I915_WRITE16(reg, 0xffff);
196         POSTING_READ16(reg);
197 }
198
199 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
200         gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
201         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
202         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
203         POSTING_READ(GEN8_##type##_IMR(which)); \
204 } while (0)
205
206 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
207         gen3_assert_iir_is_zero(dev_priv, type##IIR); \
208         I915_WRITE(type##IER, (ier_val)); \
209         I915_WRITE(type##IMR, (imr_val)); \
210         POSTING_READ(type##IMR); \
211 } while (0)
212
213 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
214         gen2_assert_iir_is_zero(dev_priv, type##IIR); \
215         I915_WRITE16(type##IER, (ier_val)); \
216         I915_WRITE16(type##IMR, (imr_val)); \
217         POSTING_READ16(type##IMR); \
218 } while (0)
219
220 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
221 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
222
223 /* For display hotplug interrupt */
224 static inline void
225 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
226                                      uint32_t mask,
227                                      uint32_t bits)
228 {
229         uint32_t val;
230
231         lockdep_assert_held(&dev_priv->irq_lock);
232         WARN_ON(bits & ~mask);
233
234         val = I915_READ(PORT_HOTPLUG_EN);
235         val &= ~mask;
236         val |= bits;
237         I915_WRITE(PORT_HOTPLUG_EN, val);
238 }
239
240 /**
241  * i915_hotplug_interrupt_update - update hotplug interrupt enable
242  * @dev_priv: driver private
243  * @mask: bits to update
244  * @bits: bits to enable
245  * NOTE: the HPD enable bits are modified both inside and outside
246  * of an interrupt context. To avoid that read-modify-write cycles
247  * interfer, these bits are protected by a spinlock. Since this
248  * function is usually not called from a context where the lock is
249  * held already, this function acquires the lock itself. A non-locking
250  * version is also available.
251  */
252 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
253                                    uint32_t mask,
254                                    uint32_t bits)
255 {
256         spin_lock_irq(&dev_priv->irq_lock);
257         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
258         spin_unlock_irq(&dev_priv->irq_lock);
259 }
260
261 static u32
262 gen11_gt_engine_identity(struct drm_i915_private * const i915,
263                          const unsigned int bank, const unsigned int bit);
264
265 static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
266                                 const unsigned int bank,
267                                 const unsigned int bit)
268 {
269         void __iomem * const regs = i915->regs;
270         u32 dw;
271
272         lockdep_assert_held(&i915->irq_lock);
273
274         dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
275         if (dw & BIT(bit)) {
276                 /*
277                  * According to the BSpec, DW_IIR bits cannot be cleared without
278                  * first servicing the Selector & Shared IIR registers.
279                  */
280                 gen11_gt_engine_identity(i915, bank, bit);
281
282                 /*
283                  * We locked GT INT DW by reading it. If we want to (try
284                  * to) recover from this succesfully, we need to clear
285                  * our bit, otherwise we are locking the register for
286                  * everybody.
287                  */
288                 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
289
290                 return true;
291         }
292
293         return false;
294 }
295
296 /**
297  * ilk_update_display_irq - update DEIMR
298  * @dev_priv: driver private
299  * @interrupt_mask: mask of interrupt bits to update
300  * @enabled_irq_mask: mask of interrupt bits to enable
301  */
302 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
303                             uint32_t interrupt_mask,
304                             uint32_t enabled_irq_mask)
305 {
306         uint32_t new_val;
307
308         lockdep_assert_held(&dev_priv->irq_lock);
309
310         WARN_ON(enabled_irq_mask & ~interrupt_mask);
311
312         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
313                 return;
314
315         new_val = dev_priv->irq_mask;
316         new_val &= ~interrupt_mask;
317         new_val |= (~enabled_irq_mask & interrupt_mask);
318
319         if (new_val != dev_priv->irq_mask) {
320                 dev_priv->irq_mask = new_val;
321                 I915_WRITE(DEIMR, dev_priv->irq_mask);
322                 POSTING_READ(DEIMR);
323         }
324 }
325
326 /**
327  * ilk_update_gt_irq - update GTIMR
328  * @dev_priv: driver private
329  * @interrupt_mask: mask of interrupt bits to update
330  * @enabled_irq_mask: mask of interrupt bits to enable
331  */
332 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
333                               uint32_t interrupt_mask,
334                               uint32_t enabled_irq_mask)
335 {
336         lockdep_assert_held(&dev_priv->irq_lock);
337
338         WARN_ON(enabled_irq_mask & ~interrupt_mask);
339
340         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
341                 return;
342
343         dev_priv->gt_irq_mask &= ~interrupt_mask;
344         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
345         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
346 }
347
348 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
349 {
350         ilk_update_gt_irq(dev_priv, mask, mask);
351         POSTING_READ_FW(GTIMR);
352 }
353
354 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
355 {
356         ilk_update_gt_irq(dev_priv, mask, 0);
357 }
358
359 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
360 {
361         WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
362
363         return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
364 }
365
366 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
367 {
368         if (INTEL_GEN(dev_priv) >= 11)
369                 return GEN11_GPM_WGBOXPERF_INTR_MASK;
370         else if (INTEL_GEN(dev_priv) >= 8)
371                 return GEN8_GT_IMR(2);
372         else
373                 return GEN6_PMIMR;
374 }
375
376 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
377 {
378         if (INTEL_GEN(dev_priv) >= 11)
379                 return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
380         else if (INTEL_GEN(dev_priv) >= 8)
381                 return GEN8_GT_IER(2);
382         else
383                 return GEN6_PMIER;
384 }
385
386 /**
387  * snb_update_pm_irq - update GEN6_PMIMR
388  * @dev_priv: driver private
389  * @interrupt_mask: mask of interrupt bits to update
390  * @enabled_irq_mask: mask of interrupt bits to enable
391  */
392 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
393                               uint32_t interrupt_mask,
394                               uint32_t enabled_irq_mask)
395 {
396         uint32_t new_val;
397
398         WARN_ON(enabled_irq_mask & ~interrupt_mask);
399
400         lockdep_assert_held(&dev_priv->irq_lock);
401
402         new_val = dev_priv->pm_imr;
403         new_val &= ~interrupt_mask;
404         new_val |= (~enabled_irq_mask & interrupt_mask);
405
406         if (new_val != dev_priv->pm_imr) {
407                 dev_priv->pm_imr = new_val;
408                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
409                 POSTING_READ(gen6_pm_imr(dev_priv));
410         }
411 }
412
413 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
414 {
415         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
416                 return;
417
418         snb_update_pm_irq(dev_priv, mask, mask);
419 }
420
421 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
422 {
423         snb_update_pm_irq(dev_priv, mask, 0);
424 }
425
426 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
427 {
428         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
429                 return;
430
431         __gen6_mask_pm_irq(dev_priv, mask);
432 }
433
434 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
435 {
436         i915_reg_t reg = gen6_pm_iir(dev_priv);
437
438         lockdep_assert_held(&dev_priv->irq_lock);
439
440         I915_WRITE(reg, reset_mask);
441         I915_WRITE(reg, reset_mask);
442         POSTING_READ(reg);
443 }
444
445 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
446 {
447         lockdep_assert_held(&dev_priv->irq_lock);
448
449         dev_priv->pm_ier |= enable_mask;
450         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
451         gen6_unmask_pm_irq(dev_priv, enable_mask);
452         /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
453 }
454
455 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
456 {
457         lockdep_assert_held(&dev_priv->irq_lock);
458
459         dev_priv->pm_ier &= ~disable_mask;
460         __gen6_mask_pm_irq(dev_priv, disable_mask);
461         I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
462         /* though a barrier is missing here, but don't really need a one */
463 }
464
465 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
466 {
467         spin_lock_irq(&dev_priv->irq_lock);
468
469         while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
470                 ;
471
472         dev_priv->gt_pm.rps.pm_iir = 0;
473
474         spin_unlock_irq(&dev_priv->irq_lock);
475 }
476
477 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
478 {
479         spin_lock_irq(&dev_priv->irq_lock);
480         gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
481         dev_priv->gt_pm.rps.pm_iir = 0;
482         spin_unlock_irq(&dev_priv->irq_lock);
483 }
484
485 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
486 {
487         struct intel_rps *rps = &dev_priv->gt_pm.rps;
488
489         if (READ_ONCE(rps->interrupts_enabled))
490                 return;
491
492         spin_lock_irq(&dev_priv->irq_lock);
493         WARN_ON_ONCE(rps->pm_iir);
494
495         if (INTEL_GEN(dev_priv) >= 11)
496                 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
497         else
498                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
499
500         rps->interrupts_enabled = true;
501         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
502
503         spin_unlock_irq(&dev_priv->irq_lock);
504 }
505
506 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
507 {
508         struct intel_rps *rps = &dev_priv->gt_pm.rps;
509
510         if (!READ_ONCE(rps->interrupts_enabled))
511                 return;
512
513         spin_lock_irq(&dev_priv->irq_lock);
514         rps->interrupts_enabled = false;
515
516         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
517
518         gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
519
520         spin_unlock_irq(&dev_priv->irq_lock);
521         synchronize_irq(dev_priv->drm.irq);
522
523         /* Now that we will not be generating any more work, flush any
524          * outstanding tasks. As we are called on the RPS idle path,
525          * we will reset the GPU to minimum frequencies, so the current
526          * state of the worker can be discarded.
527          */
528         cancel_work_sync(&rps->work);
529         if (INTEL_GEN(dev_priv) >= 11)
530                 gen11_reset_rps_interrupts(dev_priv);
531         else
532                 gen6_reset_rps_interrupts(dev_priv);
533 }
534
535 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
536 {
537         assert_rpm_wakelock_held(dev_priv);
538
539         spin_lock_irq(&dev_priv->irq_lock);
540         gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
541         spin_unlock_irq(&dev_priv->irq_lock);
542 }
543
544 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
545 {
546         assert_rpm_wakelock_held(dev_priv);
547
548         spin_lock_irq(&dev_priv->irq_lock);
549         if (!dev_priv->guc.interrupts_enabled) {
550                 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
551                                        dev_priv->pm_guc_events);
552                 dev_priv->guc.interrupts_enabled = true;
553                 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
554         }
555         spin_unlock_irq(&dev_priv->irq_lock);
556 }
557
558 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
559 {
560         assert_rpm_wakelock_held(dev_priv);
561
562         spin_lock_irq(&dev_priv->irq_lock);
563         dev_priv->guc.interrupts_enabled = false;
564
565         gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
566
567         spin_unlock_irq(&dev_priv->irq_lock);
568         synchronize_irq(dev_priv->drm.irq);
569
570         gen9_reset_guc_interrupts(dev_priv);
571 }
572
573 /**
574  * bdw_update_port_irq - update DE port interrupt
575  * @dev_priv: driver private
576  * @interrupt_mask: mask of interrupt bits to update
577  * @enabled_irq_mask: mask of interrupt bits to enable
578  */
579 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
580                                 uint32_t interrupt_mask,
581                                 uint32_t enabled_irq_mask)
582 {
583         uint32_t new_val;
584         uint32_t old_val;
585
586         lockdep_assert_held(&dev_priv->irq_lock);
587
588         WARN_ON(enabled_irq_mask & ~interrupt_mask);
589
590         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
591                 return;
592
593         old_val = I915_READ(GEN8_DE_PORT_IMR);
594
595         new_val = old_val;
596         new_val &= ~interrupt_mask;
597         new_val |= (~enabled_irq_mask & interrupt_mask);
598
599         if (new_val != old_val) {
600                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
601                 POSTING_READ(GEN8_DE_PORT_IMR);
602         }
603 }
604
605 /**
606  * bdw_update_pipe_irq - update DE pipe interrupt
607  * @dev_priv: driver private
608  * @pipe: pipe whose interrupt to update
609  * @interrupt_mask: mask of interrupt bits to update
610  * @enabled_irq_mask: mask of interrupt bits to enable
611  */
612 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
613                          enum pipe pipe,
614                          uint32_t interrupt_mask,
615                          uint32_t enabled_irq_mask)
616 {
617         uint32_t new_val;
618
619         lockdep_assert_held(&dev_priv->irq_lock);
620
621         WARN_ON(enabled_irq_mask & ~interrupt_mask);
622
623         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
624                 return;
625
626         new_val = dev_priv->de_irq_mask[pipe];
627         new_val &= ~interrupt_mask;
628         new_val |= (~enabled_irq_mask & interrupt_mask);
629
630         if (new_val != dev_priv->de_irq_mask[pipe]) {
631                 dev_priv->de_irq_mask[pipe] = new_val;
632                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
633                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
634         }
635 }
636
637 /**
638  * ibx_display_interrupt_update - update SDEIMR
639  * @dev_priv: driver private
640  * @interrupt_mask: mask of interrupt bits to update
641  * @enabled_irq_mask: mask of interrupt bits to enable
642  */
643 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
644                                   uint32_t interrupt_mask,
645                                   uint32_t enabled_irq_mask)
646 {
647         uint32_t sdeimr = I915_READ(SDEIMR);
648         sdeimr &= ~interrupt_mask;
649         sdeimr |= (~enabled_irq_mask & interrupt_mask);
650
651         WARN_ON(enabled_irq_mask & ~interrupt_mask);
652
653         lockdep_assert_held(&dev_priv->irq_lock);
654
655         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
656                 return;
657
658         I915_WRITE(SDEIMR, sdeimr);
659         POSTING_READ(SDEIMR);
660 }
661
662 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
663                               enum pipe pipe)
664 {
665         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
666         u32 enable_mask = status_mask << 16;
667
668         lockdep_assert_held(&dev_priv->irq_lock);
669
670         if (INTEL_GEN(dev_priv) < 5)
671                 goto out;
672
673         /*
674          * On pipe A we don't support the PSR interrupt yet,
675          * on pipe B and C the same bit MBZ.
676          */
677         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
678                 return 0;
679         /*
680          * On pipe B and C we don't support the PSR interrupt yet, on pipe
681          * A the same bit is for perf counters which we don't use either.
682          */
683         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
684                 return 0;
685
686         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
687                          SPRITE0_FLIP_DONE_INT_EN_VLV |
688                          SPRITE1_FLIP_DONE_INT_EN_VLV);
689         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
690                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
691         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
692                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
693
694 out:
695         WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
696                   status_mask & ~PIPESTAT_INT_STATUS_MASK,
697                   "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
698                   pipe_name(pipe), enable_mask, status_mask);
699
700         return enable_mask;
701 }
702
703 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
704                           enum pipe pipe, u32 status_mask)
705 {
706         i915_reg_t reg = PIPESTAT(pipe);
707         u32 enable_mask;
708
709         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
710                   "pipe %c: status_mask=0x%x\n",
711                   pipe_name(pipe), status_mask);
712
713         lockdep_assert_held(&dev_priv->irq_lock);
714         WARN_ON(!intel_irqs_enabled(dev_priv));
715
716         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
717                 return;
718
719         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
720         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
721
722         I915_WRITE(reg, enable_mask | status_mask);
723         POSTING_READ(reg);
724 }
725
726 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
727                            enum pipe pipe, u32 status_mask)
728 {
729         i915_reg_t reg = PIPESTAT(pipe);
730         u32 enable_mask;
731
732         WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
733                   "pipe %c: status_mask=0x%x\n",
734                   pipe_name(pipe), status_mask);
735
736         lockdep_assert_held(&dev_priv->irq_lock);
737         WARN_ON(!intel_irqs_enabled(dev_priv));
738
739         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
740                 return;
741
742         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
743         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
744
745         I915_WRITE(reg, enable_mask | status_mask);
746         POSTING_READ(reg);
747 }
748
749 /**
750  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
751  * @dev_priv: i915 device private
752  */
753 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
754 {
755         if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
756                 return;
757
758         spin_lock_irq(&dev_priv->irq_lock);
759
760         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
761         if (INTEL_GEN(dev_priv) >= 4)
762                 i915_enable_pipestat(dev_priv, PIPE_A,
763                                      PIPE_LEGACY_BLC_EVENT_STATUS);
764
765         spin_unlock_irq(&dev_priv->irq_lock);
766 }
767
768 /*
769  * This timing diagram depicts the video signal in and
770  * around the vertical blanking period.
771  *
772  * Assumptions about the fictitious mode used in this example:
773  *  vblank_start >= 3
774  *  vsync_start = vblank_start + 1
775  *  vsync_end = vblank_start + 2
776  *  vtotal = vblank_start + 3
777  *
778  *           start of vblank:
779  *           latch double buffered registers
780  *           increment frame counter (ctg+)
781  *           generate start of vblank interrupt (gen4+)
782  *           |
783  *           |          frame start:
784  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
785  *           |          may be shifted forward 1-3 extra lines via PIPECONF
786  *           |          |
787  *           |          |  start of vsync:
788  *           |          |  generate vsync interrupt
789  *           |          |  |
790  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
791  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
792  * ----va---> <-----------------vb--------------------> <--------va-------------
793  *       |          |       <----vs----->                     |
794  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
795  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
796  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
797  *       |          |                                         |
798  *       last visible pixel                                   first visible pixel
799  *                  |                                         increment frame counter (gen3/4)
800  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
801  *
802  * x  = horizontal active
803  * _  = horizontal blanking
804  * hs = horizontal sync
805  * va = vertical active
806  * vb = vertical blanking
807  * vs = vertical sync
808  * vbs = vblank_start (number)
809  *
810  * Summary:
811  * - most events happen at the start of horizontal sync
812  * - frame start happens at the start of horizontal blank, 1-4 lines
813  *   (depending on PIPECONF settings) after the start of vblank
814  * - gen3/4 pixel and frame counter are synchronized with the start
815  *   of horizontal active on the first line of vertical active
816  */
817
818 /* Called from drm generic code, passed a 'crtc', which
819  * we use as a pipe index
820  */
821 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
822 {
823         struct drm_i915_private *dev_priv = to_i915(dev);
824         i915_reg_t high_frame, low_frame;
825         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
826         const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
827         unsigned long irqflags;
828
829         htotal = mode->crtc_htotal;
830         hsync_start = mode->crtc_hsync_start;
831         vbl_start = mode->crtc_vblank_start;
832         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
833                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
834
835         /* Convert to pixel count */
836         vbl_start *= htotal;
837
838         /* Start of vblank event occurs at start of hsync */
839         vbl_start -= htotal - hsync_start;
840
841         high_frame = PIPEFRAME(pipe);
842         low_frame = PIPEFRAMEPIXEL(pipe);
843
844         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
845
846         /*
847          * High & low register fields aren't synchronized, so make sure
848          * we get a low value that's stable across two reads of the high
849          * register.
850          */
851         do {
852                 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
853                 low   = I915_READ_FW(low_frame);
854                 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
855         } while (high1 != high2);
856
857         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
858
859         high1 >>= PIPE_FRAME_HIGH_SHIFT;
860         pixel = low & PIPE_PIXEL_MASK;
861         low >>= PIPE_FRAME_LOW_SHIFT;
862
863         /*
864          * The frame counter increments at beginning of active.
865          * Cook up a vblank counter by also checking the pixel
866          * counter against vblank start.
867          */
868         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
869 }
870
871 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
872 {
873         struct drm_i915_private *dev_priv = to_i915(dev);
874
875         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
876 }
877
878 /*
879  * On certain encoders on certain platforms, pipe
880  * scanline register will not work to get the scanline,
881  * since the timings are driven from the PORT or issues
882  * with scanline register updates.
883  * This function will use Framestamp and current
884  * timestamp registers to calculate the scanline.
885  */
886 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
887 {
888         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
889         struct drm_vblank_crtc *vblank =
890                 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
891         const struct drm_display_mode *mode = &vblank->hwmode;
892         u32 vblank_start = mode->crtc_vblank_start;
893         u32 vtotal = mode->crtc_vtotal;
894         u32 htotal = mode->crtc_htotal;
895         u32 clock = mode->crtc_clock;
896         u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
897
898         /*
899          * To avoid the race condition where we might cross into the
900          * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
901          * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
902          * during the same frame.
903          */
904         do {
905                 /*
906                  * This field provides read back of the display
907                  * pipe frame time stamp. The time stamp value
908                  * is sampled at every start of vertical blank.
909                  */
910                 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
911
912                 /*
913                  * The TIMESTAMP_CTR register has the current
914                  * time stamp value.
915                  */
916                 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
917
918                 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
919         } while (scan_post_time != scan_prev_time);
920
921         scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
922                                         clock), 1000 * htotal);
923         scanline = min(scanline, vtotal - 1);
924         scanline = (scanline + vblank_start) % vtotal;
925
926         return scanline;
927 }
928
929 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
930 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
931 {
932         struct drm_device *dev = crtc->base.dev;
933         struct drm_i915_private *dev_priv = to_i915(dev);
934         const struct drm_display_mode *mode;
935         struct drm_vblank_crtc *vblank;
936         enum pipe pipe = crtc->pipe;
937         int position, vtotal;
938
939         if (!crtc->active)
940                 return -1;
941
942         vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
943         mode = &vblank->hwmode;
944
945         if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
946                 return __intel_get_crtc_scanline_from_timestamp(crtc);
947
948         vtotal = mode->crtc_vtotal;
949         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
950                 vtotal /= 2;
951
952         if (IS_GEN(dev_priv, 2))
953                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
954         else
955                 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
956
957         /*
958          * On HSW, the DSL reg (0x70000) appears to return 0 if we
959          * read it just before the start of vblank.  So try it again
960          * so we don't accidentally end up spanning a vblank frame
961          * increment, causing the pipe_update_end() code to squak at us.
962          *
963          * The nature of this problem means we can't simply check the ISR
964          * bit and return the vblank start value; nor can we use the scanline
965          * debug register in the transcoder as it appears to have the same
966          * problem.  We may need to extend this to include other platforms,
967          * but so far testing only shows the problem on HSW.
968          */
969         if (HAS_DDI(dev_priv) && !position) {
970                 int i, temp;
971
972                 for (i = 0; i < 100; i++) {
973                         udelay(1);
974                         temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
975                         if (temp != position) {
976                                 position = temp;
977                                 break;
978                         }
979                 }
980         }
981
982         /*
983          * See update_scanline_offset() for the details on the
984          * scanline_offset adjustment.
985          */
986         return (position + crtc->scanline_offset) % vtotal;
987 }
988
989 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
990                                      bool in_vblank_irq, int *vpos, int *hpos,
991                                      ktime_t *stime, ktime_t *etime,
992                                      const struct drm_display_mode *mode)
993 {
994         struct drm_i915_private *dev_priv = to_i915(dev);
995         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
996                                                                 pipe);
997         int position;
998         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
999         unsigned long irqflags;
1000
1001         if (WARN_ON(!mode->crtc_clock)) {
1002                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
1003                                  "pipe %c\n", pipe_name(pipe));
1004                 return false;
1005         }
1006
1007         htotal = mode->crtc_htotal;
1008         hsync_start = mode->crtc_hsync_start;
1009         vtotal = mode->crtc_vtotal;
1010         vbl_start = mode->crtc_vblank_start;
1011         vbl_end = mode->crtc_vblank_end;
1012
1013         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1014                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1015                 vbl_end /= 2;
1016                 vtotal /= 2;
1017         }
1018
1019         /*
1020          * Lock uncore.lock, as we will do multiple timing critical raw
1021          * register reads, potentially with preemption disabled, so the
1022          * following code must not block on uncore.lock.
1023          */
1024         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1025
1026         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1027
1028         /* Get optional system timestamp before query. */
1029         if (stime)
1030                 *stime = ktime_get();
1031
1032         if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1033                 /* No obvious pixelcount register. Only query vertical
1034                  * scanout position from Display scan line register.
1035                  */
1036                 position = __intel_get_crtc_scanline(intel_crtc);
1037         } else {
1038                 /* Have access to pixelcount since start of frame.
1039                  * We can split this into vertical and horizontal
1040                  * scanout position.
1041                  */
1042                 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
1043
1044                 /* convert to pixel counts */
1045                 vbl_start *= htotal;
1046                 vbl_end *= htotal;
1047                 vtotal *= htotal;
1048
1049                 /*
1050                  * In interlaced modes, the pixel counter counts all pixels,
1051                  * so one field will have htotal more pixels. In order to avoid
1052                  * the reported position from jumping backwards when the pixel
1053                  * counter is beyond the length of the shorter field, just
1054                  * clamp the position the length of the shorter field. This
1055                  * matches how the scanline counter based position works since
1056                  * the scanline counter doesn't count the two half lines.
1057                  */
1058                 if (position >= vtotal)
1059                         position = vtotal - 1;
1060
1061                 /*
1062                  * Start of vblank interrupt is triggered at start of hsync,
1063                  * just prior to the first active line of vblank. However we
1064                  * consider lines to start at the leading edge of horizontal
1065                  * active. So, should we get here before we've crossed into
1066                  * the horizontal active of the first line in vblank, we would
1067                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1068                  * always add htotal-hsync_start to the current pixel position.
1069                  */
1070                 position = (position + htotal - hsync_start) % vtotal;
1071         }
1072
1073         /* Get optional system timestamp after query. */
1074         if (etime)
1075                 *etime = ktime_get();
1076
1077         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1078
1079         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1080
1081         /*
1082          * While in vblank, position will be negative
1083          * counting up towards 0 at vbl_end. And outside
1084          * vblank, position will be positive counting
1085          * up since vbl_end.
1086          */
1087         if (position >= vbl_start)
1088                 position -= vbl_end;
1089         else
1090                 position += vtotal - vbl_end;
1091
1092         if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
1093                 *vpos = position;
1094                 *hpos = 0;
1095         } else {
1096                 *vpos = position / htotal;
1097                 *hpos = position - (*vpos * htotal);
1098         }
1099
1100         return true;
1101 }
1102
1103 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1104 {
1105         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1106         unsigned long irqflags;
1107         int position;
1108
1109         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1110         position = __intel_get_crtc_scanline(crtc);
1111         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1112
1113         return position;
1114 }
1115
1116 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
1117 {
1118         u32 busy_up, busy_down, max_avg, min_avg;
1119         u8 new_delay;
1120
1121         spin_lock(&mchdev_lock);
1122
1123         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1124
1125         new_delay = dev_priv->ips.cur_delay;
1126
1127         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1128         busy_up = I915_READ(RCPREVBSYTUPAVG);
1129         busy_down = I915_READ(RCPREVBSYTDNAVG);
1130         max_avg = I915_READ(RCBMAXAVG);
1131         min_avg = I915_READ(RCBMINAVG);
1132
1133         /* Handle RCS change request from hw */
1134         if (busy_up > max_avg) {
1135                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1136                         new_delay = dev_priv->ips.cur_delay - 1;
1137                 if (new_delay < dev_priv->ips.max_delay)
1138                         new_delay = dev_priv->ips.max_delay;
1139         } else if (busy_down < min_avg) {
1140                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1141                         new_delay = dev_priv->ips.cur_delay + 1;
1142                 if (new_delay > dev_priv->ips.min_delay)
1143                         new_delay = dev_priv->ips.min_delay;
1144         }
1145
1146         if (ironlake_set_drps(dev_priv, new_delay))
1147                 dev_priv->ips.cur_delay = new_delay;
1148
1149         spin_unlock(&mchdev_lock);
1150
1151         return;
1152 }
1153
1154 static void notify_ring(struct intel_engine_cs *engine)
1155 {
1156         const u32 seqno = intel_engine_get_seqno(engine);
1157         struct i915_request *rq = NULL;
1158         struct task_struct *tsk = NULL;
1159         struct intel_wait *wait;
1160
1161         if (unlikely(!engine->breadcrumbs.irq_armed))
1162                 return;
1163
1164         rcu_read_lock();
1165
1166         spin_lock(&engine->breadcrumbs.irq_lock);
1167         wait = engine->breadcrumbs.irq_wait;
1168         if (wait) {
1169                 /*
1170                  * We use a callback from the dma-fence to submit
1171                  * requests after waiting on our own requests. To
1172                  * ensure minimum delay in queuing the next request to
1173                  * hardware, signal the fence now rather than wait for
1174                  * the signaler to be woken up. We still wake up the
1175                  * waiter in order to handle the irq-seqno coherency
1176                  * issues (we may receive the interrupt before the
1177                  * seqno is written, see __i915_request_irq_complete())
1178                  * and to handle coalescing of multiple seqno updates
1179                  * and many waiters.
1180                  */
1181                 if (i915_seqno_passed(seqno, wait->seqno)) {
1182                         struct i915_request *waiter = wait->request;
1183
1184                         if (waiter &&
1185                             !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1186                                       &waiter->fence.flags) &&
1187                             intel_wait_check_request(wait, waiter))
1188                                 rq = i915_request_get(waiter);
1189
1190                         tsk = wait->tsk;
1191                 }
1192
1193                 engine->breadcrumbs.irq_count++;
1194         } else {
1195                 if (engine->breadcrumbs.irq_armed)
1196                         __intel_engine_disarm_breadcrumbs(engine);
1197         }
1198         spin_unlock(&engine->breadcrumbs.irq_lock);
1199
1200         if (rq) {
1201                 spin_lock(&rq->lock);
1202                 dma_fence_signal_locked(&rq->fence);
1203                 GEM_BUG_ON(!i915_request_completed(rq));
1204                 spin_unlock(&rq->lock);
1205
1206                 i915_request_put(rq);
1207         }
1208
1209         if (tsk && tsk->state & TASK_NORMAL)
1210                 wake_up_process(tsk);
1211
1212         rcu_read_unlock();
1213
1214         trace_intel_engine_notify(engine, wait);
1215 }
1216
1217 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1218                         struct intel_rps_ei *ei)
1219 {
1220         ei->ktime = ktime_get_raw();
1221         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1222         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1223 }
1224
1225 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1226 {
1227         memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1228 }
1229
1230 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1231 {
1232         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1233         const struct intel_rps_ei *prev = &rps->ei;
1234         struct intel_rps_ei now;
1235         u32 events = 0;
1236
1237         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1238                 return 0;
1239
1240         vlv_c0_read(dev_priv, &now);
1241
1242         if (prev->ktime) {
1243                 u64 time, c0;
1244                 u32 render, media;
1245
1246                 time = ktime_us_delta(now.ktime, prev->ktime);
1247
1248                 time *= dev_priv->czclk_freq;
1249
1250                 /* Workload can be split between render + media,
1251                  * e.g. SwapBuffers being blitted in X after being rendered in
1252                  * mesa. To account for this we need to combine both engines
1253                  * into our activity counter.
1254                  */
1255                 render = now.render_c0 - prev->render_c0;
1256                 media = now.media_c0 - prev->media_c0;
1257                 c0 = max(render, media);
1258                 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1259
1260                 if (c0 > time * rps->power.up_threshold)
1261                         events = GEN6_PM_RP_UP_THRESHOLD;
1262                 else if (c0 < time * rps->power.down_threshold)
1263                         events = GEN6_PM_RP_DOWN_THRESHOLD;
1264         }
1265
1266         rps->ei = now;
1267         return events;
1268 }
1269
1270 static void gen6_pm_rps_work(struct work_struct *work)
1271 {
1272         struct drm_i915_private *dev_priv =
1273                 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1274         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1275         bool client_boost = false;
1276         int new_delay, adj, min, max;
1277         u32 pm_iir = 0;
1278
1279         spin_lock_irq(&dev_priv->irq_lock);
1280         if (rps->interrupts_enabled) {
1281                 pm_iir = fetch_and_zero(&rps->pm_iir);
1282                 client_boost = atomic_read(&rps->num_waiters);
1283         }
1284         spin_unlock_irq(&dev_priv->irq_lock);
1285
1286         /* Make sure we didn't queue anything we're not going to process. */
1287         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1288         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1289                 goto out;
1290
1291         mutex_lock(&dev_priv->pcu_lock);
1292
1293         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1294
1295         adj = rps->last_adj;
1296         new_delay = rps->cur_freq;
1297         min = rps->min_freq_softlimit;
1298         max = rps->max_freq_softlimit;
1299         if (client_boost)
1300                 max = rps->max_freq;
1301         if (client_boost && new_delay < rps->boost_freq) {
1302                 new_delay = rps->boost_freq;
1303                 adj = 0;
1304         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1305                 if (adj > 0)
1306                         adj *= 2;
1307                 else /* CHV needs even encode values */
1308                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1309
1310                 if (new_delay >= rps->max_freq_softlimit)
1311                         adj = 0;
1312         } else if (client_boost) {
1313                 adj = 0;
1314         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1315                 if (rps->cur_freq > rps->efficient_freq)
1316                         new_delay = rps->efficient_freq;
1317                 else if (rps->cur_freq > rps->min_freq_softlimit)
1318                         new_delay = rps->min_freq_softlimit;
1319                 adj = 0;
1320         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1321                 if (adj < 0)
1322                         adj *= 2;
1323                 else /* CHV needs even encode values */
1324                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1325
1326                 if (new_delay <= rps->min_freq_softlimit)
1327                         adj = 0;
1328         } else { /* unknown event */
1329                 adj = 0;
1330         }
1331
1332         rps->last_adj = adj;
1333
1334         /* sysfs frequency interfaces may have snuck in while servicing the
1335          * interrupt
1336          */
1337         new_delay += adj;
1338         new_delay = clamp_t(int, new_delay, min, max);
1339
1340         if (intel_set_rps(dev_priv, new_delay)) {
1341                 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1342                 rps->last_adj = 0;
1343         }
1344
1345         mutex_unlock(&dev_priv->pcu_lock);
1346
1347 out:
1348         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1349         spin_lock_irq(&dev_priv->irq_lock);
1350         if (rps->interrupts_enabled)
1351                 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1352         spin_unlock_irq(&dev_priv->irq_lock);
1353 }
1354
1355
1356 /**
1357  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1358  * occurred.
1359  * @work: workqueue struct
1360  *
1361  * Doesn't actually do anything except notify userspace. As a consequence of
1362  * this event, userspace should try to remap the bad rows since statistically
1363  * it is likely the same row is more likely to go bad again.
1364  */
1365 static void ivybridge_parity_work(struct work_struct *work)
1366 {
1367         struct drm_i915_private *dev_priv =
1368                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1369         u32 error_status, row, bank, subbank;
1370         char *parity_event[6];
1371         uint32_t misccpctl;
1372         uint8_t slice = 0;
1373
1374         /* We must turn off DOP level clock gating to access the L3 registers.
1375          * In order to prevent a get/put style interface, acquire struct mutex
1376          * any time we access those registers.
1377          */
1378         mutex_lock(&dev_priv->drm.struct_mutex);
1379
1380         /* If we've screwed up tracking, just let the interrupt fire again */
1381         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1382                 goto out;
1383
1384         misccpctl = I915_READ(GEN7_MISCCPCTL);
1385         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1386         POSTING_READ(GEN7_MISCCPCTL);
1387
1388         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1389                 i915_reg_t reg;
1390
1391                 slice--;
1392                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1393                         break;
1394
1395                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1396
1397                 reg = GEN7_L3CDERRST1(slice);
1398
1399                 error_status = I915_READ(reg);
1400                 row = GEN7_PARITY_ERROR_ROW(error_status);
1401                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1402                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1403
1404                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1405                 POSTING_READ(reg);
1406
1407                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1408                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1409                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1410                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1411                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1412                 parity_event[5] = NULL;
1413
1414                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1415                                    KOBJ_CHANGE, parity_event);
1416
1417                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1418                           slice, row, bank, subbank);
1419
1420                 kfree(parity_event[4]);
1421                 kfree(parity_event[3]);
1422                 kfree(parity_event[2]);
1423                 kfree(parity_event[1]);
1424         }
1425
1426         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1427
1428 out:
1429         WARN_ON(dev_priv->l3_parity.which_slice);
1430         spin_lock_irq(&dev_priv->irq_lock);
1431         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1432         spin_unlock_irq(&dev_priv->irq_lock);
1433
1434         mutex_unlock(&dev_priv->drm.struct_mutex);
1435 }
1436
1437 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1438                                                u32 iir)
1439 {
1440         if (!HAS_L3_DPF(dev_priv))
1441                 return;
1442
1443         spin_lock(&dev_priv->irq_lock);
1444         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1445         spin_unlock(&dev_priv->irq_lock);
1446
1447         iir &= GT_PARITY_ERROR(dev_priv);
1448         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1449                 dev_priv->l3_parity.which_slice |= 1 << 1;
1450
1451         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1452                 dev_priv->l3_parity.which_slice |= 1 << 0;
1453
1454         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1455 }
1456
1457 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1458                                u32 gt_iir)
1459 {
1460         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1461                 notify_ring(dev_priv->engine[RCS]);
1462         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1463                 notify_ring(dev_priv->engine[VCS]);
1464 }
1465
1466 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1467                                u32 gt_iir)
1468 {
1469         if (gt_iir & GT_RENDER_USER_INTERRUPT)
1470                 notify_ring(dev_priv->engine[RCS]);
1471         if (gt_iir & GT_BSD_USER_INTERRUPT)
1472                 notify_ring(dev_priv->engine[VCS]);
1473         if (gt_iir & GT_BLT_USER_INTERRUPT)
1474                 notify_ring(dev_priv->engine[BCS]);
1475
1476         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1477                       GT_BSD_CS_ERROR_INTERRUPT |
1478                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1479                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1480
1481         if (gt_iir & GT_PARITY_ERROR(dev_priv))
1482                 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1483 }
1484
1485 static void
1486 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
1487 {
1488         bool tasklet = false;
1489
1490         if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1491                 tasklet = true;
1492
1493         if (iir & GT_RENDER_USER_INTERRUPT) {
1494                 notify_ring(engine);
1495                 tasklet |= USES_GUC_SUBMISSION(engine->i915);
1496         }
1497
1498         if (tasklet)
1499                 tasklet_hi_schedule(&engine->execlists.tasklet);
1500 }
1501
1502 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
1503                             u32 master_ctl, u32 gt_iir[4])
1504 {
1505         void __iomem * const regs = i915->regs;
1506
1507 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1508                       GEN8_GT_BCS_IRQ | \
1509                       GEN8_GT_VCS1_IRQ | \
1510                       GEN8_GT_VCS2_IRQ | \
1511                       GEN8_GT_VECS_IRQ | \
1512                       GEN8_GT_PM_IRQ | \
1513                       GEN8_GT_GUC_IRQ)
1514
1515         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1516                 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1517                 if (likely(gt_iir[0]))
1518                         raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
1519         }
1520
1521         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1522                 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1523                 if (likely(gt_iir[1]))
1524                         raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
1525         }
1526
1527         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1528                 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1529                 if (likely(gt_iir[2]))
1530                         raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
1531         }
1532
1533         if (master_ctl & GEN8_GT_VECS_IRQ) {
1534                 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1535                 if (likely(gt_iir[3]))
1536                         raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
1537         }
1538 }
1539
1540 static void gen8_gt_irq_handler(struct drm_i915_private *i915,
1541                                 u32 master_ctl, u32 gt_iir[4])
1542 {
1543         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1544                 gen8_cs_irq_handler(i915->engine[RCS],
1545                                     gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
1546                 gen8_cs_irq_handler(i915->engine[BCS],
1547                                     gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
1548         }
1549
1550         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1551                 gen8_cs_irq_handler(i915->engine[VCS],
1552                                     gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
1553                 gen8_cs_irq_handler(i915->engine[VCS2],
1554                                     gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
1555         }
1556
1557         if (master_ctl & GEN8_GT_VECS_IRQ) {
1558                 gen8_cs_irq_handler(i915->engine[VECS],
1559                                     gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
1560         }
1561
1562         if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
1563                 gen6_rps_irq_handler(i915, gt_iir[2]);
1564                 gen9_guc_irq_handler(i915, gt_iir[2]);
1565         }
1566 }
1567
1568 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1569 {
1570         switch (pin) {
1571         case HPD_PORT_C:
1572                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1573         case HPD_PORT_D:
1574                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1575         case HPD_PORT_E:
1576                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1577         case HPD_PORT_F:
1578                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1579         default:
1580                 return false;
1581         }
1582 }
1583
1584 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1585 {
1586         switch (pin) {
1587         case HPD_PORT_A:
1588                 return val & PORTA_HOTPLUG_LONG_DETECT;
1589         case HPD_PORT_B:
1590                 return val & PORTB_HOTPLUG_LONG_DETECT;
1591         case HPD_PORT_C:
1592                 return val & PORTC_HOTPLUG_LONG_DETECT;
1593         default:
1594                 return false;
1595         }
1596 }
1597
1598 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1599 {
1600         switch (pin) {
1601         case HPD_PORT_A:
1602                 return val & ICP_DDIA_HPD_LONG_DETECT;
1603         case HPD_PORT_B:
1604                 return val & ICP_DDIB_HPD_LONG_DETECT;
1605         default:
1606                 return false;
1607         }
1608 }
1609
1610 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1611 {
1612         switch (pin) {
1613         case HPD_PORT_C:
1614                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1615         case HPD_PORT_D:
1616                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1617         case HPD_PORT_E:
1618                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1619         case HPD_PORT_F:
1620                 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1621         default:
1622                 return false;
1623         }
1624 }
1625
1626 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1627 {
1628         switch (pin) {
1629         case HPD_PORT_E:
1630                 return val & PORTE_HOTPLUG_LONG_DETECT;
1631         default:
1632                 return false;
1633         }
1634 }
1635
1636 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1637 {
1638         switch (pin) {
1639         case HPD_PORT_A:
1640                 return val & PORTA_HOTPLUG_LONG_DETECT;
1641         case HPD_PORT_B:
1642                 return val & PORTB_HOTPLUG_LONG_DETECT;
1643         case HPD_PORT_C:
1644                 return val & PORTC_HOTPLUG_LONG_DETECT;
1645         case HPD_PORT_D:
1646                 return val & PORTD_HOTPLUG_LONG_DETECT;
1647         default:
1648                 return false;
1649         }
1650 }
1651
1652 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1653 {
1654         switch (pin) {
1655         case HPD_PORT_A:
1656                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1657         default:
1658                 return false;
1659         }
1660 }
1661
1662 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1663 {
1664         switch (pin) {
1665         case HPD_PORT_B:
1666                 return val & PORTB_HOTPLUG_LONG_DETECT;
1667         case HPD_PORT_C:
1668                 return val & PORTC_HOTPLUG_LONG_DETECT;
1669         case HPD_PORT_D:
1670                 return val & PORTD_HOTPLUG_LONG_DETECT;
1671         default:
1672                 return false;
1673         }
1674 }
1675
1676 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1677 {
1678         switch (pin) {
1679         case HPD_PORT_B:
1680                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1681         case HPD_PORT_C:
1682                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1683         case HPD_PORT_D:
1684                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1685         default:
1686                 return false;
1687         }
1688 }
1689
1690 /*
1691  * Get a bit mask of pins that have triggered, and which ones may be long.
1692  * This can be called multiple times with the same masks to accumulate
1693  * hotplug detection results from several registers.
1694  *
1695  * Note that the caller is expected to zero out the masks initially.
1696  */
1697 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1698                                u32 *pin_mask, u32 *long_mask,
1699                                u32 hotplug_trigger, u32 dig_hotplug_reg,
1700                                const u32 hpd[HPD_NUM_PINS],
1701                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
1702 {
1703         enum hpd_pin pin;
1704
1705         for_each_hpd_pin(pin) {
1706                 if ((hpd[pin] & hotplug_trigger) == 0)
1707                         continue;
1708
1709                 *pin_mask |= BIT(pin);
1710
1711                 if (long_pulse_detect(pin, dig_hotplug_reg))
1712                         *long_mask |= BIT(pin);
1713         }
1714
1715         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1716                          hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1717
1718 }
1719
1720 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1721 {
1722         wake_up_all(&dev_priv->gmbus_wait_queue);
1723 }
1724
1725 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1726 {
1727         wake_up_all(&dev_priv->gmbus_wait_queue);
1728 }
1729
1730 #if defined(CONFIG_DEBUG_FS)
1731 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1732                                          enum pipe pipe,
1733                                          uint32_t crc0, uint32_t crc1,
1734                                          uint32_t crc2, uint32_t crc3,
1735                                          uint32_t crc4)
1736 {
1737         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1738         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1739         uint32_t crcs[5];
1740
1741         spin_lock(&pipe_crc->lock);
1742         /*
1743          * For some not yet identified reason, the first CRC is
1744          * bonkers. So let's just wait for the next vblank and read
1745          * out the buggy result.
1746          *
1747          * On GEN8+ sometimes the second CRC is bonkers as well, so
1748          * don't trust that one either.
1749          */
1750         if (pipe_crc->skipped <= 0 ||
1751             (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1752                 pipe_crc->skipped++;
1753                 spin_unlock(&pipe_crc->lock);
1754                 return;
1755         }
1756         spin_unlock(&pipe_crc->lock);
1757
1758         crcs[0] = crc0;
1759         crcs[1] = crc1;
1760         crcs[2] = crc2;
1761         crcs[3] = crc3;
1762         crcs[4] = crc4;
1763         drm_crtc_add_crc_entry(&crtc->base, true,
1764                                 drm_crtc_accurate_vblank_count(&crtc->base),
1765                                 crcs);
1766 }
1767 #else
1768 static inline void
1769 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1770                              enum pipe pipe,
1771                              uint32_t crc0, uint32_t crc1,
1772                              uint32_t crc2, uint32_t crc3,
1773                              uint32_t crc4) {}
1774 #endif
1775
1776
1777 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1778                                      enum pipe pipe)
1779 {
1780         display_pipe_crc_irq_handler(dev_priv, pipe,
1781                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1782                                      0, 0, 0, 0);
1783 }
1784
1785 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1786                                      enum pipe pipe)
1787 {
1788         display_pipe_crc_irq_handler(dev_priv, pipe,
1789                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1790                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1791                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1792                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1793                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1794 }
1795
1796 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1797                                       enum pipe pipe)
1798 {
1799         uint32_t res1, res2;
1800
1801         if (INTEL_GEN(dev_priv) >= 3)
1802                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1803         else
1804                 res1 = 0;
1805
1806         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1807                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1808         else
1809                 res2 = 0;
1810
1811         display_pipe_crc_irq_handler(dev_priv, pipe,
1812                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1813                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1814                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1815                                      res1, res2);
1816 }
1817
1818 /* The RPS events need forcewake, so we add them to a work queue and mask their
1819  * IMR bits until the work is done. Other interrupts can be processed without
1820  * the work queue. */
1821 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1822 {
1823         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1824
1825         if (pm_iir & dev_priv->pm_rps_events) {
1826                 spin_lock(&dev_priv->irq_lock);
1827                 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1828                 if (rps->interrupts_enabled) {
1829                         rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1830                         schedule_work(&rps->work);
1831                 }
1832                 spin_unlock(&dev_priv->irq_lock);
1833         }
1834
1835         if (INTEL_GEN(dev_priv) >= 8)
1836                 return;
1837
1838         if (HAS_VEBOX(dev_priv)) {
1839                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1840                         notify_ring(dev_priv->engine[VECS]);
1841
1842                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1843                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1844         }
1845 }
1846
1847 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1848 {
1849         if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1850                 intel_guc_to_host_event_handler(&dev_priv->guc);
1851 }
1852
1853 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1854 {
1855         enum pipe pipe;
1856
1857         for_each_pipe(dev_priv, pipe) {
1858                 I915_WRITE(PIPESTAT(pipe),
1859                            PIPESTAT_INT_STATUS_MASK |
1860                            PIPE_FIFO_UNDERRUN_STATUS);
1861
1862                 dev_priv->pipestat_irq_mask[pipe] = 0;
1863         }
1864 }
1865
1866 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1867                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1868 {
1869         int pipe;
1870
1871         spin_lock(&dev_priv->irq_lock);
1872
1873         if (!dev_priv->display_irqs_enabled) {
1874                 spin_unlock(&dev_priv->irq_lock);
1875                 return;
1876         }
1877
1878         for_each_pipe(dev_priv, pipe) {
1879                 i915_reg_t reg;
1880                 u32 status_mask, enable_mask, iir_bit = 0;
1881
1882                 /*
1883                  * PIPESTAT bits get signalled even when the interrupt is
1884                  * disabled with the mask bits, and some of the status bits do
1885                  * not generate interrupts at all (like the underrun bit). Hence
1886                  * we need to be careful that we only handle what we want to
1887                  * handle.
1888                  */
1889
1890                 /* fifo underruns are filterered in the underrun handler. */
1891                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1892
1893                 switch (pipe) {
1894                 case PIPE_A:
1895                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1896                         break;
1897                 case PIPE_B:
1898                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1899                         break;
1900                 case PIPE_C:
1901                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1902                         break;
1903                 }
1904                 if (iir & iir_bit)
1905                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1906
1907                 if (!status_mask)
1908                         continue;
1909
1910                 reg = PIPESTAT(pipe);
1911                 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1912                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1913
1914                 /*
1915                  * Clear the PIPE*STAT regs before the IIR
1916                  *
1917                  * Toggle the enable bits to make sure we get an
1918                  * edge in the ISR pipe event bit if we don't clear
1919                  * all the enabled status bits. Otherwise the edge
1920                  * triggered IIR on i965/g4x wouldn't notice that
1921                  * an interrupt is still pending.
1922                  */
1923                 if (pipe_stats[pipe]) {
1924                         I915_WRITE(reg, pipe_stats[pipe]);
1925                         I915_WRITE(reg, enable_mask);
1926                 }
1927         }
1928         spin_unlock(&dev_priv->irq_lock);
1929 }
1930
1931 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1932                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1933 {
1934         enum pipe pipe;
1935
1936         for_each_pipe(dev_priv, pipe) {
1937                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1938                         drm_handle_vblank(&dev_priv->drm, pipe);
1939
1940                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1941                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1942
1943                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1944                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1945         }
1946 }
1947
1948 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1949                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1950 {
1951         bool blc_event = false;
1952         enum pipe pipe;
1953
1954         for_each_pipe(dev_priv, pipe) {
1955                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1956                         drm_handle_vblank(&dev_priv->drm, pipe);
1957
1958                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1959                         blc_event = true;
1960
1961                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1962                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1963
1964                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1965                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1966         }
1967
1968         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1969                 intel_opregion_asle_intr(dev_priv);
1970 }
1971
1972 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1973                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1974 {
1975         bool blc_event = false;
1976         enum pipe pipe;
1977
1978         for_each_pipe(dev_priv, pipe) {
1979                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1980                         drm_handle_vblank(&dev_priv->drm, pipe);
1981
1982                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1983                         blc_event = true;
1984
1985                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1986                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1987
1988                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1989                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1990         }
1991
1992         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1993                 intel_opregion_asle_intr(dev_priv);
1994
1995         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1996                 gmbus_irq_handler(dev_priv);
1997 }
1998
1999 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2000                                             u32 pipe_stats[I915_MAX_PIPES])
2001 {
2002         enum pipe pipe;
2003
2004         for_each_pipe(dev_priv, pipe) {
2005                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2006                         drm_handle_vblank(&dev_priv->drm, pipe);
2007
2008                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2009                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2010
2011                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2012                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2013         }
2014
2015         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2016                 gmbus_irq_handler(dev_priv);
2017 }
2018
2019 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
2020 {
2021         u32 hotplug_status = 0, hotplug_status_mask;
2022         int i;
2023
2024         if (IS_G4X(dev_priv) ||
2025             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2026                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2027                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2028         else
2029                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2030
2031         /*
2032          * We absolutely have to clear all the pending interrupt
2033          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2034          * interrupt bit won't have an edge, and the i965/g4x
2035          * edge triggered IIR will not notice that an interrupt
2036          * is still pending. We can't use PORT_HOTPLUG_EN to
2037          * guarantee the edge as the act of toggling the enable
2038          * bits can itself generate a new hotplug interrupt :(
2039          */
2040         for (i = 0; i < 10; i++) {
2041                 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2042
2043                 if (tmp == 0)
2044                         return hotplug_status;
2045
2046                 hotplug_status |= tmp;
2047                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2048         }
2049
2050         WARN_ONCE(1,
2051                   "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2052                   I915_READ(PORT_HOTPLUG_STAT));
2053
2054         return hotplug_status;
2055 }
2056
2057 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2058                                  u32 hotplug_status)
2059 {
2060         u32 pin_mask = 0, long_mask = 0;
2061
2062         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2063             IS_CHERRYVIEW(dev_priv)) {
2064                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2065
2066                 if (hotplug_trigger) {
2067                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2068                                            hotplug_trigger, hotplug_trigger,
2069                                            hpd_status_g4x,
2070                                            i9xx_port_hotplug_long_detect);
2071
2072                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2073                 }
2074
2075                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2076                         dp_aux_irq_handler(dev_priv);
2077         } else {
2078                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2079
2080                 if (hotplug_trigger) {
2081                         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2082                                            hotplug_trigger, hotplug_trigger,
2083                                            hpd_status_i915,
2084                                            i9xx_port_hotplug_long_detect);
2085                         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2086                 }
2087         }
2088 }
2089
2090 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2091 {
2092         struct drm_device *dev = arg;
2093         struct drm_i915_private *dev_priv = to_i915(dev);
2094         irqreturn_t ret = IRQ_NONE;
2095
2096         if (!intel_irqs_enabled(dev_priv))
2097                 return IRQ_NONE;
2098
2099         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2100         disable_rpm_wakeref_asserts(dev_priv);
2101
2102         do {
2103                 u32 iir, gt_iir, pm_iir;
2104                 u32 pipe_stats[I915_MAX_PIPES] = {};
2105                 u32 hotplug_status = 0;
2106                 u32 ier = 0;
2107
2108                 gt_iir = I915_READ(GTIIR);
2109                 pm_iir = I915_READ(GEN6_PMIIR);
2110                 iir = I915_READ(VLV_IIR);
2111
2112                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2113                         break;
2114
2115                 ret = IRQ_HANDLED;
2116
2117                 /*
2118                  * Theory on interrupt generation, based on empirical evidence:
2119                  *
2120                  * x = ((VLV_IIR & VLV_IER) ||
2121                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2122                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2123                  *
2124                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2125                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2126                  * guarantee the CPU interrupt will be raised again even if we
2127                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2128                  * bits this time around.
2129                  */
2130                 I915_WRITE(VLV_MASTER_IER, 0);
2131                 ier = I915_READ(VLV_IER);
2132                 I915_WRITE(VLV_IER, 0);
2133
2134                 if (gt_iir)
2135                         I915_WRITE(GTIIR, gt_iir);
2136                 if (pm_iir)
2137                         I915_WRITE(GEN6_PMIIR, pm_iir);
2138
2139                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2140                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2141
2142                 /* Call regardless, as some status bits might not be
2143                  * signalled in iir */
2144                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2145
2146                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2147                            I915_LPE_PIPE_B_INTERRUPT))
2148                         intel_lpe_audio_irq_handler(dev_priv);
2149
2150                 /*
2151                  * VLV_IIR is single buffered, and reflects the level
2152                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2153                  */
2154                 if (iir)
2155                         I915_WRITE(VLV_IIR, iir);
2156
2157                 I915_WRITE(VLV_IER, ier);
2158                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2159
2160                 if (gt_iir)
2161                         snb_gt_irq_handler(dev_priv, gt_iir);
2162                 if (pm_iir)
2163                         gen6_rps_irq_handler(dev_priv, pm_iir);
2164
2165                 if (hotplug_status)
2166                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2167
2168                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2169         } while (0);
2170
2171         enable_rpm_wakeref_asserts(dev_priv);
2172
2173         return ret;
2174 }
2175
2176 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2177 {
2178         struct drm_device *dev = arg;
2179         struct drm_i915_private *dev_priv = to_i915(dev);
2180         irqreturn_t ret = IRQ_NONE;
2181
2182         if (!intel_irqs_enabled(dev_priv))
2183                 return IRQ_NONE;
2184
2185         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2186         disable_rpm_wakeref_asserts(dev_priv);
2187
2188         do {
2189                 u32 master_ctl, iir;
2190                 u32 pipe_stats[I915_MAX_PIPES] = {};
2191                 u32 hotplug_status = 0;
2192                 u32 gt_iir[4];
2193                 u32 ier = 0;
2194
2195                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2196                 iir = I915_READ(VLV_IIR);
2197
2198                 if (master_ctl == 0 && iir == 0)
2199                         break;
2200
2201                 ret = IRQ_HANDLED;
2202
2203                 /*
2204                  * Theory on interrupt generation, based on empirical evidence:
2205                  *
2206                  * x = ((VLV_IIR & VLV_IER) ||
2207                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2208                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2209                  *
2210                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2211                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2212                  * guarantee the CPU interrupt will be raised again even if we
2213                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2214                  * bits this time around.
2215                  */
2216                 I915_WRITE(GEN8_MASTER_IRQ, 0);
2217                 ier = I915_READ(VLV_IER);
2218                 I915_WRITE(VLV_IER, 0);
2219
2220                 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2221
2222                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2223                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
2224
2225                 /* Call regardless, as some status bits might not be
2226                  * signalled in iir */
2227                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
2228
2229                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2230                            I915_LPE_PIPE_B_INTERRUPT |
2231                            I915_LPE_PIPE_C_INTERRUPT))
2232                         intel_lpe_audio_irq_handler(dev_priv);
2233
2234                 /*
2235                  * VLV_IIR is single buffered, and reflects the level
2236                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2237                  */
2238                 if (iir)
2239                         I915_WRITE(VLV_IIR, iir);
2240
2241                 I915_WRITE(VLV_IER, ier);
2242                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2243
2244                 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2245
2246                 if (hotplug_status)
2247                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2248
2249                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
2250         } while (0);
2251
2252         enable_rpm_wakeref_asserts(dev_priv);
2253
2254         return ret;
2255 }
2256
2257 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2258                                 u32 hotplug_trigger,
2259                                 const u32 hpd[HPD_NUM_PINS])
2260 {
2261         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2262
2263         /*
2264          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2265          * unless we touch the hotplug register, even if hotplug_trigger is
2266          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2267          * errors.
2268          */
2269         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2270         if (!hotplug_trigger) {
2271                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2272                         PORTD_HOTPLUG_STATUS_MASK |
2273                         PORTC_HOTPLUG_STATUS_MASK |
2274                         PORTB_HOTPLUG_STATUS_MASK;
2275                 dig_hotplug_reg &= ~mask;
2276         }
2277
2278         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2279         if (!hotplug_trigger)
2280                 return;
2281
2282         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2283                            dig_hotplug_reg, hpd,
2284                            pch_port_hotplug_long_detect);
2285
2286         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2287 }
2288
2289 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2290 {
2291         int pipe;
2292         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2293
2294         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
2295
2296         if (pch_iir & SDE_AUDIO_POWER_MASK) {
2297                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2298                                SDE_AUDIO_POWER_SHIFT);
2299                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2300                                  port_name(port));
2301         }
2302
2303         if (pch_iir & SDE_AUX_MASK)
2304                 dp_aux_irq_handler(dev_priv);
2305
2306         if (pch_iir & SDE_GMBUS)
2307                 gmbus_irq_handler(dev_priv);
2308
2309         if (pch_iir & SDE_AUDIO_HDCP_MASK)
2310                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2311
2312         if (pch_iir & SDE_AUDIO_TRANS_MASK)
2313                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2314
2315         if (pch_iir & SDE_POISON)
2316                 DRM_ERROR("PCH poison interrupt\n");
2317
2318         if (pch_iir & SDE_FDI_MASK)
2319                 for_each_pipe(dev_priv, pipe)
2320                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2321                                          pipe_name(pipe),
2322                                          I915_READ(FDI_RX_IIR(pipe)));
2323
2324         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2325                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2326
2327         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2328                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2329
2330         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2331                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
2332
2333         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2334                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
2335 }
2336
2337 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2338 {
2339         u32 err_int = I915_READ(GEN7_ERR_INT);
2340         enum pipe pipe;
2341
2342         if (err_int & ERR_INT_POISON)
2343                 DRM_ERROR("Poison interrupt\n");
2344
2345         for_each_pipe(dev_priv, pipe) {
2346                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2347                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2348
2349                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2350                         if (IS_IVYBRIDGE(dev_priv))
2351                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2352                         else
2353                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2354                 }
2355         }
2356
2357         I915_WRITE(GEN7_ERR_INT, err_int);
2358 }
2359
2360 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2361 {
2362         u32 serr_int = I915_READ(SERR_INT);
2363         enum pipe pipe;
2364
2365         if (serr_int & SERR_INT_POISON)
2366                 DRM_ERROR("PCH poison interrupt\n");
2367
2368         for_each_pipe(dev_priv, pipe)
2369                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2370                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
2371
2372         I915_WRITE(SERR_INT, serr_int);
2373 }
2374
2375 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2376 {
2377         int pipe;
2378         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2379
2380         ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2381
2382         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2383                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2384                                SDE_AUDIO_POWER_SHIFT_CPT);
2385                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2386                                  port_name(port));
2387         }
2388
2389         if (pch_iir & SDE_AUX_MASK_CPT)
2390                 dp_aux_irq_handler(dev_priv);
2391
2392         if (pch_iir & SDE_GMBUS_CPT)
2393                 gmbus_irq_handler(dev_priv);
2394
2395         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2396                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2397
2398         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2399                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2400
2401         if (pch_iir & SDE_FDI_MASK_CPT)
2402                 for_each_pipe(dev_priv, pipe)
2403                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2404                                          pipe_name(pipe),
2405                                          I915_READ(FDI_RX_IIR(pipe)));
2406
2407         if (pch_iir & SDE_ERROR_CPT)
2408                 cpt_serr_int_handler(dev_priv);
2409 }
2410
2411 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2412 {
2413         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2414         u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2415         u32 pin_mask = 0, long_mask = 0;
2416
2417         if (ddi_hotplug_trigger) {
2418                 u32 dig_hotplug_reg;
2419
2420                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2421                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2422
2423                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2424                                    ddi_hotplug_trigger,
2425                                    dig_hotplug_reg, hpd_icp,
2426                                    icp_ddi_port_hotplug_long_detect);
2427         }
2428
2429         if (tc_hotplug_trigger) {
2430                 u32 dig_hotplug_reg;
2431
2432                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2433                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2434
2435                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2436                                    tc_hotplug_trigger,
2437                                    dig_hotplug_reg, hpd_icp,
2438                                    icp_tc_port_hotplug_long_detect);
2439         }
2440
2441         if (pin_mask)
2442                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2443
2444         if (pch_iir & SDE_GMBUS_ICP)
2445                 gmbus_irq_handler(dev_priv);
2446 }
2447
2448 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2449 {
2450         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2451                 ~SDE_PORTE_HOTPLUG_SPT;
2452         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2453         u32 pin_mask = 0, long_mask = 0;
2454
2455         if (hotplug_trigger) {
2456                 u32 dig_hotplug_reg;
2457
2458                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2459                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2460
2461                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2462                                    hotplug_trigger, dig_hotplug_reg, hpd_spt,
2463                                    spt_port_hotplug_long_detect);
2464         }
2465
2466         if (hotplug2_trigger) {
2467                 u32 dig_hotplug_reg;
2468
2469                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2470                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2471
2472                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2473                                    hotplug2_trigger, dig_hotplug_reg, hpd_spt,
2474                                    spt_port_hotplug2_long_detect);
2475         }
2476
2477         if (pin_mask)
2478                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2479
2480         if (pch_iir & SDE_GMBUS_CPT)
2481                 gmbus_irq_handler(dev_priv);
2482 }
2483
2484 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2485                                 u32 hotplug_trigger,
2486                                 const u32 hpd[HPD_NUM_PINS])
2487 {
2488         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2489
2490         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2491         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2492
2493         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2494                            dig_hotplug_reg, hpd,
2495                            ilk_port_hotplug_long_detect);
2496
2497         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2498 }
2499
2500 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2501                                     u32 de_iir)
2502 {
2503         enum pipe pipe;
2504         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2505
2506         if (hotplug_trigger)
2507                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2508
2509         if (de_iir & DE_AUX_CHANNEL_A)
2510                 dp_aux_irq_handler(dev_priv);
2511
2512         if (de_iir & DE_GSE)
2513                 intel_opregion_asle_intr(dev_priv);
2514
2515         if (de_iir & DE_POISON)
2516                 DRM_ERROR("Poison interrupt\n");
2517
2518         for_each_pipe(dev_priv, pipe) {
2519                 if (de_iir & DE_PIPE_VBLANK(pipe))
2520                         drm_handle_vblank(&dev_priv->drm, pipe);
2521
2522                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2523                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2524
2525                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2526                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2527         }
2528
2529         /* check event from PCH */
2530         if (de_iir & DE_PCH_EVENT) {
2531                 u32 pch_iir = I915_READ(SDEIIR);
2532
2533                 if (HAS_PCH_CPT(dev_priv))
2534                         cpt_irq_handler(dev_priv, pch_iir);
2535                 else
2536                         ibx_irq_handler(dev_priv, pch_iir);
2537
2538                 /* should clear PCH hotplug event before clear CPU irq */
2539                 I915_WRITE(SDEIIR, pch_iir);
2540         }
2541
2542         if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2543                 ironlake_rps_change_irq_handler(dev_priv);
2544 }
2545
2546 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2547                                     u32 de_iir)
2548 {
2549         enum pipe pipe;
2550         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2551
2552         if (hotplug_trigger)
2553                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2554
2555         if (de_iir & DE_ERR_INT_IVB)
2556                 ivb_err_int_handler(dev_priv);
2557
2558         if (de_iir & DE_EDP_PSR_INT_HSW) {
2559                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2560
2561                 intel_psr_irq_handler(dev_priv, psr_iir);
2562                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2563         }
2564
2565         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2566                 dp_aux_irq_handler(dev_priv);
2567
2568         if (de_iir & DE_GSE_IVB)
2569                 intel_opregion_asle_intr(dev_priv);
2570
2571         for_each_pipe(dev_priv, pipe) {
2572                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2573                         drm_handle_vblank(&dev_priv->drm, pipe);
2574         }
2575
2576         /* check event from PCH */
2577         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2578                 u32 pch_iir = I915_READ(SDEIIR);
2579
2580                 cpt_irq_handler(dev_priv, pch_iir);
2581
2582                 /* clear PCH hotplug event before clear CPU irq */
2583                 I915_WRITE(SDEIIR, pch_iir);
2584         }
2585 }
2586
2587 /*
2588  * To handle irqs with the minimum potential races with fresh interrupts, we:
2589  * 1 - Disable Master Interrupt Control.
2590  * 2 - Find the source(s) of the interrupt.
2591  * 3 - Clear the Interrupt Identity bits (IIR).
2592  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2593  * 5 - Re-enable Master Interrupt Control.
2594  */
2595 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2596 {
2597         struct drm_device *dev = arg;
2598         struct drm_i915_private *dev_priv = to_i915(dev);
2599         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2600         irqreturn_t ret = IRQ_NONE;
2601
2602         if (!intel_irqs_enabled(dev_priv))
2603                 return IRQ_NONE;
2604
2605         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2606         disable_rpm_wakeref_asserts(dev_priv);
2607
2608         /* disable master interrupt before clearing iir  */
2609         de_ier = I915_READ(DEIER);
2610         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2611
2612         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2613          * interrupts will will be stored on its back queue, and then we'll be
2614          * able to process them after we restore SDEIER (as soon as we restore
2615          * it, we'll get an interrupt if SDEIIR still has something to process
2616          * due to its back queue). */
2617         if (!HAS_PCH_NOP(dev_priv)) {
2618                 sde_ier = I915_READ(SDEIER);
2619                 I915_WRITE(SDEIER, 0);
2620         }
2621
2622         /* Find, clear, then process each source of interrupt */
2623
2624         gt_iir = I915_READ(GTIIR);
2625         if (gt_iir) {
2626                 I915_WRITE(GTIIR, gt_iir);
2627                 ret = IRQ_HANDLED;
2628                 if (INTEL_GEN(dev_priv) >= 6)
2629                         snb_gt_irq_handler(dev_priv, gt_iir);
2630                 else
2631                         ilk_gt_irq_handler(dev_priv, gt_iir);
2632         }
2633
2634         de_iir = I915_READ(DEIIR);
2635         if (de_iir) {
2636                 I915_WRITE(DEIIR, de_iir);
2637                 ret = IRQ_HANDLED;
2638                 if (INTEL_GEN(dev_priv) >= 7)
2639                         ivb_display_irq_handler(dev_priv, de_iir);
2640                 else
2641                         ilk_display_irq_handler(dev_priv, de_iir);
2642         }
2643
2644         if (INTEL_GEN(dev_priv) >= 6) {
2645                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2646                 if (pm_iir) {
2647                         I915_WRITE(GEN6_PMIIR, pm_iir);
2648                         ret = IRQ_HANDLED;
2649                         gen6_rps_irq_handler(dev_priv, pm_iir);
2650                 }
2651         }
2652
2653         I915_WRITE(DEIER, de_ier);
2654         if (!HAS_PCH_NOP(dev_priv))
2655                 I915_WRITE(SDEIER, sde_ier);
2656
2657         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2658         enable_rpm_wakeref_asserts(dev_priv);
2659
2660         return ret;
2661 }
2662
2663 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2664                                 u32 hotplug_trigger,
2665                                 const u32 hpd[HPD_NUM_PINS])
2666 {
2667         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2668
2669         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2670         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2671
2672         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2673                            dig_hotplug_reg, hpd,
2674                            bxt_port_hotplug_long_detect);
2675
2676         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2677 }
2678
2679 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2680 {
2681         u32 pin_mask = 0, long_mask = 0;
2682         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2683         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2684
2685         if (trigger_tc) {
2686                 u32 dig_hotplug_reg;
2687
2688                 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2689                 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2690
2691                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2692                                    dig_hotplug_reg, hpd_gen11,
2693                                    gen11_port_hotplug_long_detect);
2694         }
2695
2696         if (trigger_tbt) {
2697                 u32 dig_hotplug_reg;
2698
2699                 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2700                 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2701
2702                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2703                                    dig_hotplug_reg, hpd_gen11,
2704                                    gen11_port_hotplug_long_detect);
2705         }
2706
2707         if (pin_mask)
2708                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2709         else
2710                 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2711 }
2712
2713 static irqreturn_t
2714 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2715 {
2716         irqreturn_t ret = IRQ_NONE;
2717         u32 iir;
2718         enum pipe pipe;
2719
2720         if (master_ctl & GEN8_DE_MISC_IRQ) {
2721                 iir = I915_READ(GEN8_DE_MISC_IIR);
2722                 if (iir) {
2723                         bool found = false;
2724
2725                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
2726                         ret = IRQ_HANDLED;
2727
2728                         if (iir & GEN8_DE_MISC_GSE) {
2729                                 intel_opregion_asle_intr(dev_priv);
2730                                 found = true;
2731                         }
2732
2733                         if (iir & GEN8_DE_EDP_PSR) {
2734                                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2735
2736                                 intel_psr_irq_handler(dev_priv, psr_iir);
2737                                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2738                                 found = true;
2739                         }
2740
2741                         if (!found)
2742                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2743                 }
2744                 else
2745                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2746         }
2747
2748         if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2749                 iir = I915_READ(GEN11_DE_HPD_IIR);
2750                 if (iir) {
2751                         I915_WRITE(GEN11_DE_HPD_IIR, iir);
2752                         ret = IRQ_HANDLED;
2753                         gen11_hpd_irq_handler(dev_priv, iir);
2754                 } else {
2755                         DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2756                 }
2757         }
2758
2759         if (master_ctl & GEN8_DE_PORT_IRQ) {
2760                 iir = I915_READ(GEN8_DE_PORT_IIR);
2761                 if (iir) {
2762                         u32 tmp_mask;
2763                         bool found = false;
2764
2765                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
2766                         ret = IRQ_HANDLED;
2767
2768                         tmp_mask = GEN8_AUX_CHANNEL_A;
2769                         if (INTEL_GEN(dev_priv) >= 9)
2770                                 tmp_mask |= GEN9_AUX_CHANNEL_B |
2771                                             GEN9_AUX_CHANNEL_C |
2772                                             GEN9_AUX_CHANNEL_D;
2773
2774                         if (INTEL_GEN(dev_priv) >= 11)
2775                                 tmp_mask |= ICL_AUX_CHANNEL_E;
2776
2777                         if (IS_CNL_WITH_PORT_F(dev_priv) ||
2778                             INTEL_GEN(dev_priv) >= 11)
2779                                 tmp_mask |= CNL_AUX_CHANNEL_F;
2780
2781                         if (iir & tmp_mask) {
2782                                 dp_aux_irq_handler(dev_priv);
2783                                 found = true;
2784                         }
2785
2786                         if (IS_GEN9_LP(dev_priv)) {
2787                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2788                                 if (tmp_mask) {
2789                                         bxt_hpd_irq_handler(dev_priv, tmp_mask,
2790                                                             hpd_bxt);
2791                                         found = true;
2792                                 }
2793                         } else if (IS_BROADWELL(dev_priv)) {
2794                                 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2795                                 if (tmp_mask) {
2796                                         ilk_hpd_irq_handler(dev_priv,
2797                                                             tmp_mask, hpd_bdw);
2798                                         found = true;
2799                                 }
2800                         }
2801
2802                         if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2803                                 gmbus_irq_handler(dev_priv);
2804                                 found = true;
2805                         }
2806
2807                         if (!found)
2808                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2809                 }
2810                 else
2811                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2812         }
2813
2814         for_each_pipe(dev_priv, pipe) {
2815                 u32 fault_errors;
2816
2817                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2818                         continue;
2819
2820                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2821                 if (!iir) {
2822                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2823                         continue;
2824                 }
2825
2826                 ret = IRQ_HANDLED;
2827                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2828
2829                 if (iir & GEN8_PIPE_VBLANK)
2830                         drm_handle_vblank(&dev_priv->drm, pipe);
2831
2832                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2833                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2834
2835                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2836                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2837
2838                 fault_errors = iir;
2839                 if (INTEL_GEN(dev_priv) >= 9)
2840                         fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2841                 else
2842                         fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2843
2844                 if (fault_errors)
2845                         DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2846                                   pipe_name(pipe),
2847                                   fault_errors);
2848         }
2849
2850         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2851             master_ctl & GEN8_DE_PCH_IRQ) {
2852                 /*
2853                  * FIXME(BDW): Assume for now that the new interrupt handling
2854                  * scheme also closed the SDE interrupt handling race we've seen
2855                  * on older pch-split platforms. But this needs testing.
2856                  */
2857                 iir = I915_READ(SDEIIR);
2858                 if (iir) {
2859                         I915_WRITE(SDEIIR, iir);
2860                         ret = IRQ_HANDLED;
2861
2862                         if (HAS_PCH_ICP(dev_priv))
2863                                 icp_irq_handler(dev_priv, iir);
2864                         else if (HAS_PCH_SPT(dev_priv) ||
2865                                  HAS_PCH_KBP(dev_priv) ||
2866                                  HAS_PCH_CNP(dev_priv))
2867                                 spt_irq_handler(dev_priv, iir);
2868                         else
2869                                 cpt_irq_handler(dev_priv, iir);
2870                 } else {
2871                         /*
2872                          * Like on previous PCH there seems to be something
2873                          * fishy going on with forwarding PCH interrupts.
2874                          */
2875                         DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2876                 }
2877         }
2878
2879         return ret;
2880 }
2881
2882 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2883 {
2884         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2885
2886         /*
2887          * Now with master disabled, get a sample of level indications
2888          * for this interrupt. Indications will be cleared on related acks.
2889          * New indications can and will light up during processing,
2890          * and will generate new interrupt after enabling master.
2891          */
2892         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2893 }
2894
2895 static inline void gen8_master_intr_enable(void __iomem * const regs)
2896 {
2897         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2898 }
2899
2900 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2901 {
2902         struct drm_i915_private *dev_priv = to_i915(arg);
2903         void __iomem * const regs = dev_priv->regs;
2904         u32 master_ctl;
2905         u32 gt_iir[4];
2906
2907         if (!intel_irqs_enabled(dev_priv))
2908                 return IRQ_NONE;
2909
2910         master_ctl = gen8_master_intr_disable(regs);
2911         if (!master_ctl) {
2912                 gen8_master_intr_enable(regs);
2913                 return IRQ_NONE;
2914         }
2915
2916         /* Find, clear, then process each source of interrupt */
2917         gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2918
2919         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2920         if (master_ctl & ~GEN8_GT_IRQS) {
2921                 disable_rpm_wakeref_asserts(dev_priv);
2922                 gen8_de_irq_handler(dev_priv, master_ctl);
2923                 enable_rpm_wakeref_asserts(dev_priv);
2924         }
2925
2926         gen8_master_intr_enable(regs);
2927
2928         gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
2929
2930         return IRQ_HANDLED;
2931 }
2932
2933 struct wedge_me {
2934         struct delayed_work work;
2935         struct drm_i915_private *i915;
2936         const char *name;
2937 };
2938
2939 static void wedge_me(struct work_struct *work)
2940 {
2941         struct wedge_me *w = container_of(work, typeof(*w), work.work);
2942
2943         dev_err(w->i915->drm.dev,
2944                 "%s timed out, cancelling all in-flight rendering.\n",
2945                 w->name);
2946         i915_gem_set_wedged(w->i915);
2947 }
2948
2949 static void __init_wedge(struct wedge_me *w,
2950                          struct drm_i915_private *i915,
2951                          long timeout,
2952                          const char *name)
2953 {
2954         w->i915 = i915;
2955         w->name = name;
2956
2957         INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2958         schedule_delayed_work(&w->work, timeout);
2959 }
2960
2961 static void __fini_wedge(struct wedge_me *w)
2962 {
2963         cancel_delayed_work_sync(&w->work);
2964         destroy_delayed_work_on_stack(&w->work);
2965         w->i915 = NULL;
2966 }
2967
2968 #define i915_wedge_on_timeout(W, DEV, TIMEOUT)                          \
2969         for (__init_wedge((W), (DEV), (TIMEOUT), __func__);             \
2970              (W)->i915;                                                 \
2971              __fini_wedge((W)))
2972
2973 static u32
2974 gen11_gt_engine_identity(struct drm_i915_private * const i915,
2975                          const unsigned int bank, const unsigned int bit)
2976 {
2977         void __iomem * const regs = i915->regs;
2978         u32 timeout_ts;
2979         u32 ident;
2980
2981         lockdep_assert_held(&i915->irq_lock);
2982
2983         raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2984
2985         /*
2986          * NB: Specs do not specify how long to spin wait,
2987          * so we do ~100us as an educated guess.
2988          */
2989         timeout_ts = (local_clock() >> 10) + 100;
2990         do {
2991                 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2992         } while (!(ident & GEN11_INTR_DATA_VALID) &&
2993                  !time_after32(local_clock() >> 10, timeout_ts));
2994
2995         if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2996                 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2997                           bank, bit, ident);
2998                 return 0;
2999         }
3000
3001         raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
3002                       GEN11_INTR_DATA_VALID);
3003
3004         return ident;
3005 }
3006
3007 static void
3008 gen11_other_irq_handler(struct drm_i915_private * const i915,
3009                         const u8 instance, const u16 iir)
3010 {
3011         if (instance == OTHER_GTPM_INSTANCE)
3012                 return gen6_rps_irq_handler(i915, iir);
3013
3014         WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3015                   instance, iir);
3016 }
3017
3018 static void
3019 gen11_engine_irq_handler(struct drm_i915_private * const i915,
3020                          const u8 class, const u8 instance, const u16 iir)
3021 {
3022         struct intel_engine_cs *engine;
3023
3024         if (instance <= MAX_ENGINE_INSTANCE)
3025                 engine = i915->engine_class[class][instance];
3026         else
3027                 engine = NULL;
3028
3029         if (likely(engine))
3030                 return gen8_cs_irq_handler(engine, iir);
3031
3032         WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3033                   class, instance);
3034 }
3035
3036 static void
3037 gen11_gt_identity_handler(struct drm_i915_private * const i915,
3038                           const u32 identity)
3039 {
3040         const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3041         const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3042         const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3043
3044         if (unlikely(!intr))
3045                 return;
3046
3047         if (class <= COPY_ENGINE_CLASS)
3048                 return gen11_engine_irq_handler(i915, class, instance, intr);
3049
3050         if (class == OTHER_CLASS)
3051                 return gen11_other_irq_handler(i915, instance, intr);
3052
3053         WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3054                   class, instance, intr);
3055 }
3056
3057 static void
3058 gen11_gt_bank_handler(struct drm_i915_private * const i915,
3059                       const unsigned int bank)
3060 {
3061         void __iomem * const regs = i915->regs;
3062         unsigned long intr_dw;
3063         unsigned int bit;
3064
3065         lockdep_assert_held(&i915->irq_lock);
3066
3067         intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3068
3069         if (unlikely(!intr_dw)) {
3070                 DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3071                 return;
3072         }
3073
3074         for_each_set_bit(bit, &intr_dw, 32) {
3075                 const u32 ident = gen11_gt_engine_identity(i915,
3076                                                            bank, bit);
3077
3078                 gen11_gt_identity_handler(i915, ident);
3079         }
3080
3081         /* Clear must be after shared has been served for engine */
3082         raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3083 }
3084
3085 static void
3086 gen11_gt_irq_handler(struct drm_i915_private * const i915,
3087                      const u32 master_ctl)
3088 {
3089         unsigned int bank;
3090
3091         spin_lock(&i915->irq_lock);
3092
3093         for (bank = 0; bank < 2; bank++) {
3094                 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3095                         gen11_gt_bank_handler(i915, bank);
3096         }
3097
3098         spin_unlock(&i915->irq_lock);
3099 }
3100
3101 static u32
3102 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3103 {
3104         void __iomem * const regs = dev_priv->regs;
3105         u32 iir;
3106
3107         if (!(master_ctl & GEN11_GU_MISC_IRQ))
3108                 return 0;
3109
3110         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3111         if (likely(iir))
3112                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3113
3114         return iir;
3115 }
3116
3117 static void
3118 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3119 {
3120         if (iir & GEN11_GU_MISC_GSE)
3121                 intel_opregion_asle_intr(dev_priv);
3122 }
3123
3124 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
3125 {
3126         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3127
3128         /*
3129          * Now with master disabled, get a sample of level indications
3130          * for this interrupt. Indications will be cleared on related acks.
3131          * New indications can and will light up during processing,
3132          * and will generate new interrupt after enabling master.
3133          */
3134         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3135 }
3136
3137 static inline void gen11_master_intr_enable(void __iomem * const regs)
3138 {
3139         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
3140 }
3141
3142 static irqreturn_t gen11_irq_handler(int irq, void *arg)
3143 {
3144         struct drm_i915_private * const i915 = to_i915(arg);
3145         void __iomem * const regs = i915->regs;
3146         u32 master_ctl;
3147         u32 gu_misc_iir;
3148
3149         if (!intel_irqs_enabled(i915))
3150                 return IRQ_NONE;
3151
3152         master_ctl = gen11_master_intr_disable(regs);
3153         if (!master_ctl) {
3154                 gen11_master_intr_enable(regs);
3155                 return IRQ_NONE;
3156         }
3157
3158         /* Find, clear, then process each source of interrupt. */
3159         gen11_gt_irq_handler(i915, master_ctl);
3160
3161         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3162         if (master_ctl & GEN11_DISPLAY_IRQ) {
3163                 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3164
3165                 disable_rpm_wakeref_asserts(i915);
3166                 /*
3167                  * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3168                  * for the display related bits.
3169                  */
3170                 gen8_de_irq_handler(i915, disp_ctl);
3171                 enable_rpm_wakeref_asserts(i915);
3172         }
3173
3174         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3175
3176         gen11_master_intr_enable(regs);
3177
3178         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3179
3180         return IRQ_HANDLED;
3181 }
3182
3183 static void i915_reset_device(struct drm_i915_private *dev_priv,
3184                               u32 engine_mask,
3185                               const char *reason)
3186 {
3187         struct i915_gpu_error *error = &dev_priv->gpu_error;
3188         struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
3189         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
3190         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
3191         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
3192         struct wedge_me w;
3193
3194         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
3195
3196         DRM_DEBUG_DRIVER("resetting chip\n");
3197         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
3198
3199         /* Use a watchdog to ensure that our reset completes */
3200         i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
3201                 intel_prepare_reset(dev_priv);
3202
3203                 error->reason = reason;
3204                 error->stalled_mask = engine_mask;
3205
3206                 /* Signal that locked waiters should reset the GPU */
3207                 smp_mb__before_atomic();
3208                 set_bit(I915_RESET_HANDOFF, &error->flags);
3209                 wake_up_all(&error->wait_queue);
3210
3211                 /* Wait for anyone holding the lock to wakeup, without
3212                  * blocking indefinitely on struct_mutex.
3213                  */
3214                 do {
3215                         if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
3216                                 i915_reset(dev_priv, engine_mask, reason);
3217                                 mutex_unlock(&dev_priv->drm.struct_mutex);
3218                         }
3219                 } while (wait_on_bit_timeout(&error->flags,
3220                                              I915_RESET_HANDOFF,
3221                                              TASK_UNINTERRUPTIBLE,
3222                                              1));
3223
3224                 error->stalled_mask = 0;
3225                 error->reason = NULL;
3226
3227                 intel_finish_reset(dev_priv);
3228         }
3229
3230         if (!test_bit(I915_WEDGED, &error->flags))
3231                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
3232 }
3233
3234 void i915_clear_error_registers(struct drm_i915_private *dev_priv)
3235 {
3236         u32 eir;
3237
3238         if (!IS_GEN(dev_priv, 2))
3239                 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
3240
3241         if (INTEL_GEN(dev_priv) < 4)
3242                 I915_WRITE(IPEIR, I915_READ(IPEIR));
3243         else
3244                 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
3245
3246         I915_WRITE(EIR, I915_READ(EIR));
3247         eir = I915_READ(EIR);
3248         if (eir) {
3249                 /*
3250                  * some errors might have become stuck,
3251                  * mask them.
3252                  */
3253                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
3254                 I915_WRITE(EMR, I915_READ(EMR) | eir);
3255                 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
3256         }
3257
3258         if (INTEL_GEN(dev_priv) >= 8) {
3259                 I915_WRITE(GEN8_RING_FAULT_REG,
3260                            I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
3261                 POSTING_READ(GEN8_RING_FAULT_REG);
3262         } else if (INTEL_GEN(dev_priv) >= 6) {
3263                 struct intel_engine_cs *engine;
3264                 enum intel_engine_id id;
3265
3266                 for_each_engine(engine, dev_priv, id) {
3267                         I915_WRITE(RING_FAULT_REG(engine),
3268                                    I915_READ(RING_FAULT_REG(engine)) &
3269                                    ~RING_FAULT_VALID);
3270                 }
3271                 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
3272         }
3273 }
3274
3275 /**
3276  * i915_handle_error - handle a gpu error
3277  * @dev_priv: i915 device private
3278  * @engine_mask: mask representing engines that are hung
3279  * @flags: control flags
3280  * @fmt: Error message format string
3281  *
3282  * Do some basic checking of register state at error time and
3283  * dump it to the syslog.  Also call i915_capture_error_state() to make
3284  * sure we get a record and make it available in debugfs.  Fire a uevent
3285  * so userspace knows something bad happened (should trigger collection
3286  * of a ring dump etc.).
3287  */
3288 void i915_handle_error(struct drm_i915_private *dev_priv,
3289                        u32 engine_mask,
3290                        unsigned long flags,
3291                        const char *fmt, ...)
3292 {
3293         struct intel_engine_cs *engine;
3294         intel_wakeref_t wakeref;
3295         unsigned int tmp;
3296         char error_msg[80];
3297         char *msg = NULL;
3298
3299         if (fmt) {
3300                 va_list args;
3301
3302                 va_start(args, fmt);
3303                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
3304                 va_end(args);
3305
3306                 msg = error_msg;
3307         }
3308
3309         /*
3310          * In most cases it's guaranteed that we get here with an RPM
3311          * reference held, for example because there is a pending GPU
3312          * request that won't finish until the reset is done. This
3313          * isn't the case at least when we get here by doing a
3314          * simulated reset via debugfs, so get an RPM reference.
3315          */
3316         wakeref = intel_runtime_pm_get(dev_priv);
3317
3318         engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
3319
3320         if (flags & I915_ERROR_CAPTURE) {
3321                 i915_capture_error_state(dev_priv, engine_mask, msg);
3322                 i915_clear_error_registers(dev_priv);
3323         }
3324
3325         /*
3326          * Try engine reset when available. We fall back to full reset if
3327          * single reset fails.
3328          */
3329         if (intel_has_reset_engine(dev_priv) &&
3330             !i915_terminally_wedged(&dev_priv->gpu_error)) {
3331                 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
3332                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
3333                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3334                                              &dev_priv->gpu_error.flags))
3335                                 continue;
3336
3337                         if (i915_reset_engine(engine, msg) == 0)
3338                                 engine_mask &= ~intel_engine_flag(engine);
3339
3340                         clear_bit(I915_RESET_ENGINE + engine->id,
3341                                   &dev_priv->gpu_error.flags);
3342                         wake_up_bit(&dev_priv->gpu_error.flags,
3343                                     I915_RESET_ENGINE + engine->id);
3344                 }
3345         }
3346
3347         if (!engine_mask)
3348                 goto out;
3349
3350         /* Full reset needs the mutex, stop any other user trying to do so. */
3351         if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3352                 wait_event(dev_priv->gpu_error.reset_queue,
3353                            !test_bit(I915_RESET_BACKOFF,
3354                                      &dev_priv->gpu_error.flags));
3355                 goto out;
3356         }
3357
3358         /* Prevent any other reset-engine attempt. */
3359         for_each_engine(engine, dev_priv, tmp) {
3360                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3361                                         &dev_priv->gpu_error.flags))
3362                         wait_on_bit(&dev_priv->gpu_error.flags,
3363                                     I915_RESET_ENGINE + engine->id,
3364                                     TASK_UNINTERRUPTIBLE);
3365         }
3366
3367         i915_reset_device(dev_priv, engine_mask, msg);
3368
3369         for_each_engine(engine, dev_priv, tmp) {
3370                 clear_bit(I915_RESET_ENGINE + engine->id,
3371                           &dev_priv->gpu_error.flags);
3372         }
3373
3374         clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3375         wake_up_all(&dev_priv->gpu_error.reset_queue);
3376
3377 out:
3378         intel_runtime_pm_put(dev_priv, wakeref);
3379 }
3380
3381 /* Called from drm generic code, passed 'crtc' which
3382  * we use as a pipe index
3383  */
3384 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
3385 {
3386         struct drm_i915_private *dev_priv = to_i915(dev);
3387         unsigned long irqflags;
3388
3389         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3390         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3391         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3392
3393         return 0;
3394 }
3395
3396 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3397 {
3398         struct drm_i915_private *dev_priv = to_i915(dev);
3399         unsigned long irqflags;
3400
3401         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3402         i915_enable_pipestat(dev_priv, pipe,
3403                              PIPE_START_VBLANK_INTERRUPT_STATUS);
3404         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3405
3406         return 0;
3407 }
3408
3409 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
3410 {
3411         struct drm_i915_private *dev_priv = to_i915(dev);
3412         unsigned long irqflags;
3413         uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3414                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3415
3416         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3417         ilk_enable_display_irq(dev_priv, bit);
3418         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3419
3420         /* Even though there is no DMC, frame counter can get stuck when
3421          * PSR is active as no frames are generated.
3422          */
3423         if (HAS_PSR(dev_priv))
3424                 drm_vblank_restore(dev, pipe);
3425
3426         return 0;
3427 }
3428
3429 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
3430 {
3431         struct drm_i915_private *dev_priv = to_i915(dev);
3432         unsigned long irqflags;
3433
3434         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3435         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3436         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3437
3438         /* Even if there is no DMC, frame counter can get stuck when
3439          * PSR is active as no frames are generated, so check only for PSR.
3440          */
3441         if (HAS_PSR(dev_priv))
3442                 drm_vblank_restore(dev, pipe);
3443
3444         return 0;
3445 }
3446
3447 /* Called from drm generic code, passed 'crtc' which
3448  * we use as a pipe index
3449  */
3450 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3451 {
3452         struct drm_i915_private *dev_priv = to_i915(dev);
3453         unsigned long irqflags;
3454
3455         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3456         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3457         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3458 }
3459
3460 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
3461 {
3462         struct drm_i915_private *dev_priv = to_i915(dev);
3463         unsigned long irqflags;
3464
3465         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3466         i915_disable_pipestat(dev_priv, pipe,
3467                               PIPE_START_VBLANK_INTERRUPT_STATUS);
3468         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3469 }
3470
3471 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
3472 {
3473         struct drm_i915_private *dev_priv = to_i915(dev);
3474         unsigned long irqflags;
3475         uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
3476                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
3477
3478         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3479         ilk_disable_display_irq(dev_priv, bit);
3480         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3481 }
3482
3483 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
3484 {
3485         struct drm_i915_private *dev_priv = to_i915(dev);
3486         unsigned long irqflags;
3487
3488         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3489         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3490         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3491 }
3492
3493 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3494 {
3495         if (HAS_PCH_NOP(dev_priv))
3496                 return;
3497
3498         GEN3_IRQ_RESET(SDE);
3499
3500         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3501                 I915_WRITE(SERR_INT, 0xffffffff);
3502 }
3503
3504 /*
3505  * SDEIER is also touched by the interrupt handler to work around missed PCH
3506  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3507  * instead we unconditionally enable all PCH interrupt sources here, but then
3508  * only unmask them as needed with SDEIMR.
3509  *
3510  * This function needs to be called before interrupts are enabled.
3511  */
3512 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3513 {
3514         struct drm_i915_private *dev_priv = to_i915(dev);
3515
3516         if (HAS_PCH_NOP(dev_priv))
3517                 return;
3518
3519         WARN_ON(I915_READ(SDEIER) != 0);
3520         I915_WRITE(SDEIER, 0xffffffff);
3521         POSTING_READ(SDEIER);
3522 }
3523
3524 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
3525 {
3526         GEN3_IRQ_RESET(GT);
3527         if (INTEL_GEN(dev_priv) >= 6)
3528                 GEN3_IRQ_RESET(GEN6_PM);
3529 }
3530
3531 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3532 {
3533         if (IS_CHERRYVIEW(dev_priv))
3534                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3535         else
3536                 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3537
3538         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3539         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3540
3541         i9xx_pipestat_irq_reset(dev_priv);
3542
3543         GEN3_IRQ_RESET(VLV_);
3544         dev_priv->irq_mask = ~0u;
3545 }
3546
3547 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3548 {
3549         u32 pipestat_mask;
3550         u32 enable_mask;
3551         enum pipe pipe;
3552
3553         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3554
3555         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3556         for_each_pipe(dev_priv, pipe)
3557                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3558
3559         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3560                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3561                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3562                 I915_LPE_PIPE_A_INTERRUPT |
3563                 I915_LPE_PIPE_B_INTERRUPT;
3564
3565         if (IS_CHERRYVIEW(dev_priv))
3566                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3567                         I915_LPE_PIPE_C_INTERRUPT;
3568
3569         WARN_ON(dev_priv->irq_mask != ~0u);
3570
3571         dev_priv->irq_mask = ~enable_mask;
3572
3573         GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3574 }
3575
3576 /* drm_dma.h hooks
3577 */
3578 static void ironlake_irq_reset(struct drm_device *dev)
3579 {
3580         struct drm_i915_private *dev_priv = to_i915(dev);
3581
3582         GEN3_IRQ_RESET(DE);
3583         if (IS_GEN(dev_priv, 7))
3584                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3585
3586         if (IS_HASWELL(dev_priv)) {
3587                 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3588                 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3589         }
3590
3591         gen5_gt_irq_reset(dev_priv);
3592
3593         ibx_irq_reset(dev_priv);
3594 }
3595
3596 static void valleyview_irq_reset(struct drm_device *dev)
3597 {
3598         struct drm_i915_private *dev_priv = to_i915(dev);
3599
3600         I915_WRITE(VLV_MASTER_IER, 0);
3601         POSTING_READ(VLV_MASTER_IER);
3602
3603         gen5_gt_irq_reset(dev_priv);
3604
3605         spin_lock_irq(&dev_priv->irq_lock);
3606         if (dev_priv->display_irqs_enabled)
3607                 vlv_display_irq_reset(dev_priv);
3608         spin_unlock_irq(&dev_priv->irq_lock);
3609 }
3610
3611 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3612 {
3613         GEN8_IRQ_RESET_NDX(GT, 0);
3614         GEN8_IRQ_RESET_NDX(GT, 1);
3615         GEN8_IRQ_RESET_NDX(GT, 2);
3616         GEN8_IRQ_RESET_NDX(GT, 3);
3617 }
3618
3619 static void gen8_irq_reset(struct drm_device *dev)
3620 {
3621         struct drm_i915_private *dev_priv = to_i915(dev);
3622         int pipe;
3623
3624         gen8_master_intr_disable(dev_priv->regs);
3625
3626         gen8_gt_irq_reset(dev_priv);
3627
3628         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3629         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3630
3631         for_each_pipe(dev_priv, pipe)
3632                 if (intel_display_power_is_enabled(dev_priv,
3633                                                    POWER_DOMAIN_PIPE(pipe)))
3634                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3635
3636         GEN3_IRQ_RESET(GEN8_DE_PORT_);
3637         GEN3_IRQ_RESET(GEN8_DE_MISC_);
3638         GEN3_IRQ_RESET(GEN8_PCU_);
3639
3640         if (HAS_PCH_SPLIT(dev_priv))
3641                 ibx_irq_reset(dev_priv);
3642 }
3643
3644 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3645 {
3646         /* Disable RCS, BCS, VCS and VECS class engines. */
3647         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3648         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    0);
3649
3650         /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3651         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~0);
3652         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~0);
3653         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~0);
3654         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~0);
3655         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
3656
3657         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3658         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
3659 }
3660
3661 static void gen11_irq_reset(struct drm_device *dev)
3662 {
3663         struct drm_i915_private *dev_priv = dev->dev_private;
3664         int pipe;
3665
3666         gen11_master_intr_disable(dev_priv->regs);
3667
3668         gen11_gt_irq_reset(dev_priv);
3669
3670         I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3671
3672         I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3673         I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3674
3675         for_each_pipe(dev_priv, pipe)
3676                 if (intel_display_power_is_enabled(dev_priv,
3677                                                    POWER_DOMAIN_PIPE(pipe)))
3678                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3679
3680         GEN3_IRQ_RESET(GEN8_DE_PORT_);
3681         GEN3_IRQ_RESET(GEN8_DE_MISC_);
3682         GEN3_IRQ_RESET(GEN11_DE_HPD_);
3683         GEN3_IRQ_RESET(GEN11_GU_MISC_);
3684         GEN3_IRQ_RESET(GEN8_PCU_);
3685
3686         if (HAS_PCH_ICP(dev_priv))
3687                 GEN3_IRQ_RESET(SDE);
3688 }
3689
3690 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3691                                      u8 pipe_mask)
3692 {
3693         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3694         enum pipe pipe;
3695
3696         spin_lock_irq(&dev_priv->irq_lock);
3697
3698         if (!intel_irqs_enabled(dev_priv)) {
3699                 spin_unlock_irq(&dev_priv->irq_lock);
3700                 return;
3701         }
3702
3703         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3704                 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3705                                   dev_priv->de_irq_mask[pipe],
3706                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3707
3708         spin_unlock_irq(&dev_priv->irq_lock);
3709 }
3710
3711 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3712                                      u8 pipe_mask)
3713 {
3714         enum pipe pipe;
3715
3716         spin_lock_irq(&dev_priv->irq_lock);
3717
3718         if (!intel_irqs_enabled(dev_priv)) {
3719                 spin_unlock_irq(&dev_priv->irq_lock);
3720                 return;
3721         }
3722
3723         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3724                 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3725
3726         spin_unlock_irq(&dev_priv->irq_lock);
3727
3728         /* make sure we're done processing display irqs */
3729         synchronize_irq(dev_priv->drm.irq);
3730 }
3731
3732 static void cherryview_irq_reset(struct drm_device *dev)
3733 {
3734         struct drm_i915_private *dev_priv = to_i915(dev);
3735
3736         I915_WRITE(GEN8_MASTER_IRQ, 0);
3737         POSTING_READ(GEN8_MASTER_IRQ);
3738
3739         gen8_gt_irq_reset(dev_priv);
3740
3741         GEN3_IRQ_RESET(GEN8_PCU_);
3742
3743         spin_lock_irq(&dev_priv->irq_lock);
3744         if (dev_priv->display_irqs_enabled)
3745                 vlv_display_irq_reset(dev_priv);
3746         spin_unlock_irq(&dev_priv->irq_lock);
3747 }
3748
3749 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3750                                   const u32 hpd[HPD_NUM_PINS])
3751 {
3752         struct intel_encoder *encoder;
3753         u32 enabled_irqs = 0;
3754
3755         for_each_intel_encoder(&dev_priv->drm, encoder)
3756                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3757                         enabled_irqs |= hpd[encoder->hpd_pin];
3758
3759         return enabled_irqs;
3760 }
3761
3762 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3763 {
3764         u32 hotplug;
3765
3766         /*
3767          * Enable digital hotplug on the PCH, and configure the DP short pulse
3768          * duration to 2ms (which is the minimum in the Display Port spec).
3769          * The pulse duration bits are reserved on LPT+.
3770          */
3771         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3772         hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3773                      PORTC_PULSE_DURATION_MASK |
3774                      PORTD_PULSE_DURATION_MASK);
3775         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3776         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3777         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3778         /*
3779          * When CPU and PCH are on the same package, port A
3780          * HPD must be enabled in both north and south.
3781          */
3782         if (HAS_PCH_LPT_LP(dev_priv))
3783                 hotplug |= PORTA_HOTPLUG_ENABLE;
3784         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3785 }
3786
3787 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3788 {
3789         u32 hotplug_irqs, enabled_irqs;
3790
3791         if (HAS_PCH_IBX(dev_priv)) {
3792                 hotplug_irqs = SDE_HOTPLUG_MASK;
3793                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3794         } else {
3795                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3796                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3797         }
3798
3799         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3800
3801         ibx_hpd_detection_setup(dev_priv);
3802 }
3803
3804 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3805 {
3806         u32 hotplug;
3807
3808         hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3809         hotplug |= ICP_DDIA_HPD_ENABLE |
3810                    ICP_DDIB_HPD_ENABLE;
3811         I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3812
3813         hotplug = I915_READ(SHOTPLUG_CTL_TC);
3814         hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3815                    ICP_TC_HPD_ENABLE(PORT_TC2) |
3816                    ICP_TC_HPD_ENABLE(PORT_TC3) |
3817                    ICP_TC_HPD_ENABLE(PORT_TC4);
3818         I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3819 }
3820
3821 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3822 {
3823         u32 hotplug_irqs, enabled_irqs;
3824
3825         hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3826         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3827
3828         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3829
3830         icp_hpd_detection_setup(dev_priv);
3831 }
3832
3833 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3834 {
3835         u32 hotplug;
3836
3837         hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3838         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3839                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3840                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3841                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3842         I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3843
3844         hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3845         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3846                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3847                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3848                    GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3849         I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3850 }
3851
3852 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3853 {
3854         u32 hotplug_irqs, enabled_irqs;
3855         u32 val;
3856
3857         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3858         hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3859
3860         val = I915_READ(GEN11_DE_HPD_IMR);
3861         val &= ~hotplug_irqs;
3862         I915_WRITE(GEN11_DE_HPD_IMR, val);
3863         POSTING_READ(GEN11_DE_HPD_IMR);
3864
3865         gen11_hpd_detection_setup(dev_priv);
3866
3867         if (HAS_PCH_ICP(dev_priv))
3868                 icp_hpd_irq_setup(dev_priv);
3869 }
3870
3871 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3872 {
3873         u32 val, hotplug;
3874
3875         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3876         if (HAS_PCH_CNP(dev_priv)) {
3877                 val = I915_READ(SOUTH_CHICKEN1);
3878                 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3879                 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3880                 I915_WRITE(SOUTH_CHICKEN1, val);
3881         }
3882
3883         /* Enable digital hotplug on the PCH */
3884         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3885         hotplug |= PORTA_HOTPLUG_ENABLE |
3886                    PORTB_HOTPLUG_ENABLE |
3887                    PORTC_HOTPLUG_ENABLE |
3888                    PORTD_HOTPLUG_ENABLE;
3889         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3890
3891         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3892         hotplug |= PORTE_HOTPLUG_ENABLE;
3893         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3894 }
3895
3896 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3897 {
3898         u32 hotplug_irqs, enabled_irqs;
3899
3900         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3901         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3902
3903         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3904
3905         spt_hpd_detection_setup(dev_priv);
3906 }
3907
3908 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3909 {
3910         u32 hotplug;
3911
3912         /*
3913          * Enable digital hotplug on the CPU, and configure the DP short pulse
3914          * duration to 2ms (which is the minimum in the Display Port spec)
3915          * The pulse duration bits are reserved on HSW+.
3916          */
3917         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3918         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3919         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3920                    DIGITAL_PORTA_PULSE_DURATION_2ms;
3921         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3922 }
3923
3924 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3925 {
3926         u32 hotplug_irqs, enabled_irqs;
3927
3928         if (INTEL_GEN(dev_priv) >= 8) {
3929                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3930                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3931
3932                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3933         } else if (INTEL_GEN(dev_priv) >= 7) {
3934                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3935                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3936
3937                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3938         } else {
3939                 hotplug_irqs = DE_DP_A_HOTPLUG;
3940                 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3941
3942                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3943         }
3944
3945         ilk_hpd_detection_setup(dev_priv);
3946
3947         ibx_hpd_irq_setup(dev_priv);
3948 }
3949
3950 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3951                                       u32 enabled_irqs)
3952 {
3953         u32 hotplug;
3954
3955         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3956         hotplug |= PORTA_HOTPLUG_ENABLE |
3957                    PORTB_HOTPLUG_ENABLE |
3958                    PORTC_HOTPLUG_ENABLE;
3959
3960         DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3961                       hotplug, enabled_irqs);
3962         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3963
3964         /*
3965          * For BXT invert bit has to be set based on AOB design
3966          * for HPD detection logic, update it based on VBT fields.
3967          */
3968         if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3969             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3970                 hotplug |= BXT_DDIA_HPD_INVERT;
3971         if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3972             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3973                 hotplug |= BXT_DDIB_HPD_INVERT;
3974         if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3975             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3976                 hotplug |= BXT_DDIC_HPD_INVERT;
3977
3978         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3979 }
3980
3981 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3982 {
3983         __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3984 }
3985
3986 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3987 {
3988         u32 hotplug_irqs, enabled_irqs;
3989
3990         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3991         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3992
3993         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3994
3995         __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3996 }
3997
3998 static void ibx_irq_postinstall(struct drm_device *dev)
3999 {
4000         struct drm_i915_private *dev_priv = to_i915(dev);
4001         u32 mask;
4002
4003         if (HAS_PCH_NOP(dev_priv))
4004                 return;
4005
4006         if (HAS_PCH_IBX(dev_priv))
4007                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
4008         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
4009                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
4010         else
4011                 mask = SDE_GMBUS_CPT;
4012
4013         gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4014         I915_WRITE(SDEIMR, ~mask);
4015
4016         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
4017             HAS_PCH_LPT(dev_priv))
4018                 ibx_hpd_detection_setup(dev_priv);
4019         else
4020                 spt_hpd_detection_setup(dev_priv);
4021 }
4022
4023 static void gen5_gt_irq_postinstall(struct drm_device *dev)
4024 {
4025         struct drm_i915_private *dev_priv = to_i915(dev);
4026         u32 pm_irqs, gt_irqs;
4027
4028         pm_irqs = gt_irqs = 0;
4029
4030         dev_priv->gt_irq_mask = ~0;
4031         if (HAS_L3_DPF(dev_priv)) {
4032                 /* L3 parity interrupt is always unmasked. */
4033                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4034                 gt_irqs |= GT_PARITY_ERROR(dev_priv);
4035         }
4036
4037         gt_irqs |= GT_RENDER_USER_INTERRUPT;
4038         if (IS_GEN(dev_priv, 5)) {
4039                 gt_irqs |= ILK_BSD_USER_INTERRUPT;
4040         } else {
4041                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
4042         }
4043
4044         GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
4045
4046         if (INTEL_GEN(dev_priv) >= 6) {
4047                 /*
4048                  * RPS interrupts will get enabled/disabled on demand when RPS
4049                  * itself is enabled/disabled.
4050                  */
4051                 if (HAS_VEBOX(dev_priv)) {
4052                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
4053                         dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
4054                 }
4055
4056                 dev_priv->pm_imr = 0xffffffff;
4057                 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
4058         }
4059 }
4060
4061 static int ironlake_irq_postinstall(struct drm_device *dev)
4062 {
4063         struct drm_i915_private *dev_priv = to_i915(dev);
4064         u32 display_mask, extra_mask;
4065
4066         if (INTEL_GEN(dev_priv) >= 7) {
4067                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
4068                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
4069                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
4070                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
4071                               DE_DP_A_HOTPLUG_IVB);
4072         } else {
4073                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
4074                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4075                                 DE_PIPEA_CRC_DONE | DE_POISON);
4076                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4077                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4078                               DE_DP_A_HOTPLUG);
4079         }
4080
4081         if (IS_HASWELL(dev_priv)) {
4082                 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4083                 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4084                 display_mask |= DE_EDP_PSR_INT_HSW;
4085         }
4086
4087         dev_priv->irq_mask = ~display_mask;
4088
4089         ibx_irq_pre_postinstall(dev);
4090
4091         GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
4092
4093         gen5_gt_irq_postinstall(dev);
4094
4095         ilk_hpd_detection_setup(dev_priv);
4096
4097         ibx_irq_postinstall(dev);
4098
4099         if (IS_IRONLAKE_M(dev_priv)) {
4100                 /* Enable PCU event interrupts
4101                  *
4102                  * spinlocking not required here for correctness since interrupt
4103                  * setup is guaranteed to run in single-threaded context. But we
4104                  * need it to make the assert_spin_locked happy. */
4105                 spin_lock_irq(&dev_priv->irq_lock);
4106                 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
4107                 spin_unlock_irq(&dev_priv->irq_lock);
4108         }
4109
4110         return 0;
4111 }
4112
4113 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4114 {
4115         lockdep_assert_held(&dev_priv->irq_lock);
4116
4117         if (dev_priv->display_irqs_enabled)
4118                 return;
4119
4120         dev_priv->display_irqs_enabled = true;
4121
4122         if (intel_irqs_enabled(dev_priv)) {
4123                 vlv_display_irq_reset(dev_priv);
4124                 vlv_display_irq_postinstall(dev_priv);
4125         }
4126 }
4127
4128 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4129 {
4130         lockdep_assert_held(&dev_priv->irq_lock);
4131
4132         if (!dev_priv->display_irqs_enabled)
4133                 return;
4134
4135         dev_priv->display_irqs_enabled = false;
4136
4137         if (intel_irqs_enabled(dev_priv))
4138                 vlv_display_irq_reset(dev_priv);
4139 }
4140
4141
4142 static int valleyview_irq_postinstall(struct drm_device *dev)
4143 {
4144         struct drm_i915_private *dev_priv = to_i915(dev);
4145
4146         gen5_gt_irq_postinstall(dev);
4147
4148         spin_lock_irq(&dev_priv->irq_lock);
4149         if (dev_priv->display_irqs_enabled)
4150                 vlv_display_irq_postinstall(dev_priv);
4151         spin_unlock_irq(&dev_priv->irq_lock);
4152
4153         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
4154         POSTING_READ(VLV_MASTER_IER);
4155
4156         return 0;
4157 }
4158
4159 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4160 {
4161         /* These are interrupts we'll toggle with the ring mask register */
4162         uint32_t gt_interrupts[] = {
4163                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4164                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
4165                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4166                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
4167                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4168                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4169                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
4170                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
4171                 0,
4172                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4173                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
4174                 };
4175
4176         dev_priv->pm_ier = 0x0;
4177         dev_priv->pm_imr = ~dev_priv->pm_ier;
4178         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4179         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
4180         /*
4181          * RPS interrupts will get enabled/disabled on demand when RPS itself
4182          * is enabled/disabled. Same wil be the case for GuC interrupts.
4183          */
4184         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
4185         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
4186 }
4187
4188 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4189 {
4190         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4191         uint32_t de_pipe_enables;
4192         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4193         u32 de_port_enables;
4194         u32 de_misc_masked = GEN8_DE_EDP_PSR;
4195         enum pipe pipe;
4196
4197         if (INTEL_GEN(dev_priv) <= 10)
4198                 de_misc_masked |= GEN8_DE_MISC_GSE;
4199
4200         if (INTEL_GEN(dev_priv) >= 9) {
4201                 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
4202                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4203                                   GEN9_AUX_CHANNEL_D;
4204                 if (IS_GEN9_LP(dev_priv))
4205                         de_port_masked |= BXT_DE_PORT_GMBUS;
4206         } else {
4207                 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
4208         }
4209
4210         if (INTEL_GEN(dev_priv) >= 11)
4211                 de_port_masked |= ICL_AUX_CHANNEL_E;
4212
4213         if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
4214                 de_port_masked |= CNL_AUX_CHANNEL_F;
4215
4216         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4217                                            GEN8_PIPE_FIFO_UNDERRUN;
4218
4219         de_port_enables = de_port_masked;
4220         if (IS_GEN9_LP(dev_priv))
4221                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4222         else if (IS_BROADWELL(dev_priv))
4223                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4224
4225         gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
4226         intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
4227
4228         for_each_pipe(dev_priv, pipe) {
4229                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
4230
4231                 if (intel_display_power_is_enabled(dev_priv,
4232                                 POWER_DOMAIN_PIPE(pipe)))
4233                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4234                                           dev_priv->de_irq_mask[pipe],
4235                                           de_pipe_enables);
4236         }
4237
4238         GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4239         GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
4240
4241         if (INTEL_GEN(dev_priv) >= 11) {
4242                 u32 de_hpd_masked = 0;
4243                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4244                                      GEN11_DE_TBT_HOTPLUG_MASK;
4245
4246                 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4247                 gen11_hpd_detection_setup(dev_priv);
4248         } else if (IS_GEN9_LP(dev_priv)) {
4249                 bxt_hpd_detection_setup(dev_priv);
4250         } else if (IS_BROADWELL(dev_priv)) {
4251                 ilk_hpd_detection_setup(dev_priv);
4252         }
4253 }
4254
4255 static int gen8_irq_postinstall(struct drm_device *dev)
4256 {
4257         struct drm_i915_private *dev_priv = to_i915(dev);
4258
4259         if (HAS_PCH_SPLIT(dev_priv))
4260                 ibx_irq_pre_postinstall(dev);
4261
4262         gen8_gt_irq_postinstall(dev_priv);
4263         gen8_de_irq_postinstall(dev_priv);
4264
4265         if (HAS_PCH_SPLIT(dev_priv))
4266                 ibx_irq_postinstall(dev);
4267
4268         gen8_master_intr_enable(dev_priv->regs);
4269
4270         return 0;
4271 }
4272
4273 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4274 {
4275         const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4276
4277         BUILD_BUG_ON(irqs & 0xffff0000);
4278
4279         /* Enable RCS, BCS, VCS and VECS class interrupts. */
4280         I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4281         I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE,    irqs << 16 | irqs);
4282
4283         /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4284         I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK,   ~(irqs << 16));
4285         I915_WRITE(GEN11_BCS_RSVD_INTR_MASK,    ~(irqs << 16));
4286         I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK,   ~(irqs | irqs << 16));
4287         I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK,   ~(irqs | irqs << 16));
4288         I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4289
4290         /*
4291          * RPS interrupts will get enabled/disabled on demand when RPS itself
4292          * is enabled/disabled.
4293          */
4294         dev_priv->pm_ier = 0x0;
4295         dev_priv->pm_imr = ~dev_priv->pm_ier;
4296         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4297         I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
4298 }
4299
4300 static void icp_irq_postinstall(struct drm_device *dev)
4301 {
4302         struct drm_i915_private *dev_priv = to_i915(dev);
4303         u32 mask = SDE_GMBUS_ICP;
4304
4305         WARN_ON(I915_READ(SDEIER) != 0);
4306         I915_WRITE(SDEIER, 0xffffffff);
4307         POSTING_READ(SDEIER);
4308
4309         gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4310         I915_WRITE(SDEIMR, ~mask);
4311
4312         icp_hpd_detection_setup(dev_priv);
4313 }
4314
4315 static int gen11_irq_postinstall(struct drm_device *dev)
4316 {
4317         struct drm_i915_private *dev_priv = dev->dev_private;
4318         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
4319
4320         if (HAS_PCH_ICP(dev_priv))
4321                 icp_irq_postinstall(dev);
4322
4323         gen11_gt_irq_postinstall(dev_priv);
4324         gen8_de_irq_postinstall(dev_priv);
4325
4326         GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4327
4328         I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4329
4330         gen11_master_intr_enable(dev_priv->regs);
4331
4332         return 0;
4333 }
4334
4335 static int cherryview_irq_postinstall(struct drm_device *dev)
4336 {
4337         struct drm_i915_private *dev_priv = to_i915(dev);
4338
4339         gen8_gt_irq_postinstall(dev_priv);
4340
4341         spin_lock_irq(&dev_priv->irq_lock);
4342         if (dev_priv->display_irqs_enabled)
4343                 vlv_display_irq_postinstall(dev_priv);
4344         spin_unlock_irq(&dev_priv->irq_lock);
4345
4346         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
4347         POSTING_READ(GEN8_MASTER_IRQ);
4348
4349         return 0;
4350 }
4351
4352 static void i8xx_irq_reset(struct drm_device *dev)
4353 {
4354         struct drm_i915_private *dev_priv = to_i915(dev);
4355
4356         i9xx_pipestat_irq_reset(dev_priv);
4357
4358         GEN2_IRQ_RESET();
4359 }
4360
4361 static int i8xx_irq_postinstall(struct drm_device *dev)
4362 {
4363         struct drm_i915_private *dev_priv = to_i915(dev);
4364         u16 enable_mask;
4365
4366         I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4367                             I915_ERROR_MEMORY_REFRESH));
4368
4369         /* Unmask the interrupts that we always want on. */
4370         dev_priv->irq_mask =
4371                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4372                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4373                   I915_MASTER_ERROR_INTERRUPT);
4374
4375         enable_mask =
4376                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4377                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4378                 I915_MASTER_ERROR_INTERRUPT |
4379                 I915_USER_INTERRUPT;
4380
4381         GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4382
4383         /* Interrupt setup is already guaranteed to be single-threaded, this is
4384          * just to make the assert_spin_locked check happy. */
4385         spin_lock_irq(&dev_priv->irq_lock);
4386         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4387         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4388         spin_unlock_irq(&dev_priv->irq_lock);
4389
4390         return 0;
4391 }
4392
4393 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4394                                u16 *eir, u16 *eir_stuck)
4395 {
4396         u16 emr;
4397
4398         *eir = I915_READ16(EIR);
4399
4400         if (*eir)
4401                 I915_WRITE16(EIR, *eir);
4402
4403         *eir_stuck = I915_READ16(EIR);
4404         if (*eir_stuck == 0)
4405                 return;
4406
4407         /*
4408          * Toggle all EMR bits to make sure we get an edge
4409          * in the ISR master error bit if we don't clear
4410          * all the EIR bits. Otherwise the edge triggered
4411          * IIR on i965/g4x wouldn't notice that an interrupt
4412          * is still pending. Also some EIR bits can't be
4413          * cleared except by handling the underlying error
4414          * (or by a GPU reset) so we mask any bit that
4415          * remains set.
4416          */
4417         emr = I915_READ16(EMR);
4418         I915_WRITE16(EMR, 0xffff);
4419         I915_WRITE16(EMR, emr | *eir_stuck);
4420 }
4421
4422 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4423                                    u16 eir, u16 eir_stuck)
4424 {
4425         DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4426
4427         if (eir_stuck)
4428                 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4429 }
4430
4431 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4432                                u32 *eir, u32 *eir_stuck)
4433 {
4434         u32 emr;
4435
4436         *eir = I915_READ(EIR);
4437
4438         I915_WRITE(EIR, *eir);
4439
4440         *eir_stuck = I915_READ(EIR);
4441         if (*eir_stuck == 0)
4442                 return;
4443
4444         /*
4445          * Toggle all EMR bits to make sure we get an edge
4446          * in the ISR master error bit if we don't clear
4447          * all the EIR bits. Otherwise the edge triggered
4448          * IIR on i965/g4x wouldn't notice that an interrupt
4449          * is still pending. Also some EIR bits can't be
4450          * cleared except by handling the underlying error
4451          * (or by a GPU reset) so we mask any bit that
4452          * remains set.
4453          */
4454         emr = I915_READ(EMR);
4455         I915_WRITE(EMR, 0xffffffff);
4456         I915_WRITE(EMR, emr | *eir_stuck);
4457 }
4458
4459 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4460                                    u32 eir, u32 eir_stuck)
4461 {
4462         DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4463
4464         if (eir_stuck)
4465                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4466 }
4467
4468 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4469 {
4470         struct drm_device *dev = arg;
4471         struct drm_i915_private *dev_priv = to_i915(dev);
4472         irqreturn_t ret = IRQ_NONE;
4473
4474         if (!intel_irqs_enabled(dev_priv))
4475                 return IRQ_NONE;
4476
4477         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4478         disable_rpm_wakeref_asserts(dev_priv);
4479
4480         do {
4481                 u32 pipe_stats[I915_MAX_PIPES] = {};
4482                 u16 eir = 0, eir_stuck = 0;
4483                 u16 iir;
4484
4485                 iir = I915_READ16(IIR);
4486                 if (iir == 0)
4487                         break;
4488
4489                 ret = IRQ_HANDLED;
4490
4491                 /* Call regardless, as some status bits might not be
4492                  * signalled in iir */
4493                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4494
4495                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4496                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4497
4498                 I915_WRITE16(IIR, iir);
4499
4500                 if (iir & I915_USER_INTERRUPT)
4501                         notify_ring(dev_priv->engine[RCS]);
4502
4503                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4504                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4505
4506                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4507         } while (0);
4508
4509         enable_rpm_wakeref_asserts(dev_priv);
4510
4511         return ret;
4512 }
4513
4514 static void i915_irq_reset(struct drm_device *dev)
4515 {
4516         struct drm_i915_private *dev_priv = to_i915(dev);
4517
4518         if (I915_HAS_HOTPLUG(dev_priv)) {
4519                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4520                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4521         }
4522
4523         i9xx_pipestat_irq_reset(dev_priv);
4524
4525         GEN3_IRQ_RESET();
4526 }
4527
4528 static int i915_irq_postinstall(struct drm_device *dev)
4529 {
4530         struct drm_i915_private *dev_priv = to_i915(dev);
4531         u32 enable_mask;
4532
4533         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4534                           I915_ERROR_MEMORY_REFRESH));
4535
4536         /* Unmask the interrupts that we always want on. */
4537         dev_priv->irq_mask =
4538                 ~(I915_ASLE_INTERRUPT |
4539                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4540                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4541                   I915_MASTER_ERROR_INTERRUPT);
4542
4543         enable_mask =
4544                 I915_ASLE_INTERRUPT |
4545                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4546                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4547                 I915_MASTER_ERROR_INTERRUPT |
4548                 I915_USER_INTERRUPT;
4549
4550         if (I915_HAS_HOTPLUG(dev_priv)) {
4551                 /* Enable in IER... */
4552                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4553                 /* and unmask in IMR */
4554                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4555         }
4556
4557         GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4558
4559         /* Interrupt setup is already guaranteed to be single-threaded, this is
4560          * just to make the assert_spin_locked check happy. */
4561         spin_lock_irq(&dev_priv->irq_lock);
4562         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4563         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4564         spin_unlock_irq(&dev_priv->irq_lock);
4565
4566         i915_enable_asle_pipestat(dev_priv);
4567
4568         return 0;
4569 }
4570
4571 static irqreturn_t i915_irq_handler(int irq, void *arg)
4572 {
4573         struct drm_device *dev = arg;
4574         struct drm_i915_private *dev_priv = to_i915(dev);
4575         irqreturn_t ret = IRQ_NONE;
4576
4577         if (!intel_irqs_enabled(dev_priv))
4578                 return IRQ_NONE;
4579
4580         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4581         disable_rpm_wakeref_asserts(dev_priv);
4582
4583         do {
4584                 u32 pipe_stats[I915_MAX_PIPES] = {};
4585                 u32 eir = 0, eir_stuck = 0;
4586                 u32 hotplug_status = 0;
4587                 u32 iir;
4588
4589                 iir = I915_READ(IIR);
4590                 if (iir == 0)
4591                         break;
4592
4593                 ret = IRQ_HANDLED;
4594
4595                 if (I915_HAS_HOTPLUG(dev_priv) &&
4596                     iir & I915_DISPLAY_PORT_INTERRUPT)
4597                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4598
4599                 /* Call regardless, as some status bits might not be
4600                  * signalled in iir */
4601                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4602
4603                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4604                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4605
4606                 I915_WRITE(IIR, iir);
4607
4608                 if (iir & I915_USER_INTERRUPT)
4609                         notify_ring(dev_priv->engine[RCS]);
4610
4611                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4612                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4613
4614                 if (hotplug_status)
4615                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4616
4617                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4618         } while (0);
4619
4620         enable_rpm_wakeref_asserts(dev_priv);
4621
4622         return ret;
4623 }
4624
4625 static void i965_irq_reset(struct drm_device *dev)
4626 {
4627         struct drm_i915_private *dev_priv = to_i915(dev);
4628
4629         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4630         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4631
4632         i9xx_pipestat_irq_reset(dev_priv);
4633
4634         GEN3_IRQ_RESET();
4635 }
4636
4637 static int i965_irq_postinstall(struct drm_device *dev)
4638 {
4639         struct drm_i915_private *dev_priv = to_i915(dev);
4640         u32 enable_mask;
4641         u32 error_mask;
4642
4643         /*
4644          * Enable some error detection, note the instruction error mask
4645          * bit is reserved, so we leave it masked.
4646          */
4647         if (IS_G4X(dev_priv)) {
4648                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4649                                GM45_ERROR_MEM_PRIV |
4650                                GM45_ERROR_CP_PRIV |
4651                                I915_ERROR_MEMORY_REFRESH);
4652         } else {
4653                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4654                                I915_ERROR_MEMORY_REFRESH);
4655         }
4656         I915_WRITE(EMR, error_mask);
4657
4658         /* Unmask the interrupts that we always want on. */
4659         dev_priv->irq_mask =
4660                 ~(I915_ASLE_INTERRUPT |
4661                   I915_DISPLAY_PORT_INTERRUPT |
4662                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4663                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4664                   I915_MASTER_ERROR_INTERRUPT);
4665
4666         enable_mask =
4667                 I915_ASLE_INTERRUPT |
4668                 I915_DISPLAY_PORT_INTERRUPT |
4669                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4670                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4671                 I915_MASTER_ERROR_INTERRUPT |
4672                 I915_USER_INTERRUPT;
4673
4674         if (IS_G4X(dev_priv))
4675                 enable_mask |= I915_BSD_USER_INTERRUPT;
4676
4677         GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4678
4679         /* Interrupt setup is already guaranteed to be single-threaded, this is
4680          * just to make the assert_spin_locked check happy. */
4681         spin_lock_irq(&dev_priv->irq_lock);
4682         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4683         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4684         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4685         spin_unlock_irq(&dev_priv->irq_lock);
4686
4687         i915_enable_asle_pipestat(dev_priv);
4688
4689         return 0;
4690 }
4691
4692 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4693 {
4694         u32 hotplug_en;
4695
4696         lockdep_assert_held(&dev_priv->irq_lock);
4697
4698         /* Note HDMI and DP share hotplug bits */
4699         /* enable bits are the same for all generations */
4700         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4701         /* Programming the CRT detection parameters tends
4702            to generate a spurious hotplug event about three
4703            seconds later.  So just do it once.
4704         */
4705         if (IS_G4X(dev_priv))
4706                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4707         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4708
4709         /* Ignore TV since it's buggy */
4710         i915_hotplug_interrupt_update_locked(dev_priv,
4711                                              HOTPLUG_INT_EN_MASK |
4712                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4713                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4714                                              hotplug_en);
4715 }
4716
4717 static irqreturn_t i965_irq_handler(int irq, void *arg)
4718 {
4719         struct drm_device *dev = arg;
4720         struct drm_i915_private *dev_priv = to_i915(dev);
4721         irqreturn_t ret = IRQ_NONE;
4722
4723         if (!intel_irqs_enabled(dev_priv))
4724                 return IRQ_NONE;
4725
4726         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4727         disable_rpm_wakeref_asserts(dev_priv);
4728
4729         do {
4730                 u32 pipe_stats[I915_MAX_PIPES] = {};
4731                 u32 eir = 0, eir_stuck = 0;
4732                 u32 hotplug_status = 0;
4733                 u32 iir;
4734
4735                 iir = I915_READ(IIR);
4736                 if (iir == 0)
4737                         break;
4738
4739                 ret = IRQ_HANDLED;
4740
4741                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4742                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4743
4744                 /* Call regardless, as some status bits might not be
4745                  * signalled in iir */
4746                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4747
4748                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4749                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4750
4751                 I915_WRITE(IIR, iir);
4752
4753                 if (iir & I915_USER_INTERRUPT)
4754                         notify_ring(dev_priv->engine[RCS]);
4755
4756                 if (iir & I915_BSD_USER_INTERRUPT)
4757                         notify_ring(dev_priv->engine[VCS]);
4758
4759                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4760                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4761
4762                 if (hotplug_status)
4763                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4764
4765                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4766         } while (0);
4767
4768         enable_rpm_wakeref_asserts(dev_priv);
4769
4770         return ret;
4771 }
4772
4773 /**
4774  * intel_irq_init - initializes irq support
4775  * @dev_priv: i915 device instance
4776  *
4777  * This function initializes all the irq support including work items, timers
4778  * and all the vtables. It does not setup the interrupt itself though.
4779  */
4780 void intel_irq_init(struct drm_i915_private *dev_priv)
4781 {
4782         struct drm_device *dev = &dev_priv->drm;
4783         struct intel_rps *rps = &dev_priv->gt_pm.rps;
4784         int i;
4785
4786         intel_hpd_init_work(dev_priv);
4787
4788         INIT_WORK(&rps->work, gen6_pm_rps_work);
4789
4790         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4791         for (i = 0; i < MAX_L3_SLICES; ++i)
4792                 dev_priv->l3_parity.remap_info[i] = NULL;
4793
4794         if (HAS_GUC_SCHED(dev_priv))
4795                 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4796
4797         /* Let's track the enabled rps events */
4798         if (IS_VALLEYVIEW(dev_priv))
4799                 /* WaGsvRC0ResidencyMethod:vlv */
4800                 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4801         else
4802                 dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
4803                                            GEN6_PM_RP_DOWN_THRESHOLD |
4804                                            GEN6_PM_RP_DOWN_TIMEOUT);
4805
4806         rps->pm_intrmsk_mbz = 0;
4807
4808         /*
4809          * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
4810          * if GEN6_PM_UP_EI_EXPIRED is masked.
4811          *
4812          * TODO: verify if this can be reproduced on VLV,CHV.
4813          */
4814         if (INTEL_GEN(dev_priv) <= 7)
4815                 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4816
4817         if (INTEL_GEN(dev_priv) >= 8)
4818                 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4819
4820         if (IS_GEN(dev_priv, 2)) {
4821                 /* Gen2 doesn't have a hardware frame counter */
4822                 dev->max_vblank_count = 0;
4823         } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
4824                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4825                 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4826         } else {
4827                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4828                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4829         }
4830
4831         /*
4832          * Opt out of the vblank disable timer on everything except gen2.
4833          * Gen2 doesn't have a hardware frame counter and so depends on
4834          * vblank interrupts to produce sane vblank seuquence numbers.
4835          */
4836         if (!IS_GEN(dev_priv, 2))
4837                 dev->vblank_disable_immediate = true;
4838
4839         /* Most platforms treat the display irq block as an always-on
4840          * power domain. vlv/chv can disable it at runtime and need
4841          * special care to avoid writing any of the display block registers
4842          * outside of the power domain. We defer setting up the display irqs
4843          * in this case to the runtime pm.
4844          */
4845         dev_priv->display_irqs_enabled = true;
4846         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4847                 dev_priv->display_irqs_enabled = false;
4848
4849         dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4850         /* If we have MST support, we want to avoid doing short HPD IRQ storm
4851          * detection, as short HPD storms will occur as a natural part of
4852          * sideband messaging with MST.
4853          * On older platforms however, IRQ storms can occur with both long and
4854          * short pulses, as seen on some G4x systems.
4855          */
4856         dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4857
4858         dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
4859         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4860
4861         if (IS_CHERRYVIEW(dev_priv)) {
4862                 dev->driver->irq_handler = cherryview_irq_handler;
4863                 dev->driver->irq_preinstall = cherryview_irq_reset;
4864                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4865                 dev->driver->irq_uninstall = cherryview_irq_reset;
4866                 dev->driver->enable_vblank = i965_enable_vblank;
4867                 dev->driver->disable_vblank = i965_disable_vblank;
4868                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4869         } else if (IS_VALLEYVIEW(dev_priv)) {
4870                 dev->driver->irq_handler = valleyview_irq_handler;
4871                 dev->driver->irq_preinstall = valleyview_irq_reset;
4872                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4873                 dev->driver->irq_uninstall = valleyview_irq_reset;
4874                 dev->driver->enable_vblank = i965_enable_vblank;
4875                 dev->driver->disable_vblank = i965_disable_vblank;
4876                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4877         } else if (INTEL_GEN(dev_priv) >= 11) {
4878                 dev->driver->irq_handler = gen11_irq_handler;
4879                 dev->driver->irq_preinstall = gen11_irq_reset;
4880                 dev->driver->irq_postinstall = gen11_irq_postinstall;
4881                 dev->driver->irq_uninstall = gen11_irq_reset;
4882                 dev->driver->enable_vblank = gen8_enable_vblank;
4883                 dev->driver->disable_vblank = gen8_disable_vblank;
4884                 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4885         } else if (INTEL_GEN(dev_priv) >= 8) {
4886                 dev->driver->irq_handler = gen8_irq_handler;
4887                 dev->driver->irq_preinstall = gen8_irq_reset;
4888                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4889                 dev->driver->irq_uninstall = gen8_irq_reset;
4890                 dev->driver->enable_vblank = gen8_enable_vblank;
4891                 dev->driver->disable_vblank = gen8_disable_vblank;
4892                 if (IS_GEN9_LP(dev_priv))
4893                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4894                 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4895                          HAS_PCH_CNP(dev_priv))
4896                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4897                 else
4898                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4899         } else if (HAS_PCH_SPLIT(dev_priv)) {
4900                 dev->driver->irq_handler = ironlake_irq_handler;
4901                 dev->driver->irq_preinstall = ironlake_irq_reset;
4902                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4903                 dev->driver->irq_uninstall = ironlake_irq_reset;
4904                 dev->driver->enable_vblank = ironlake_enable_vblank;
4905                 dev->driver->disable_vblank = ironlake_disable_vblank;
4906                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4907         } else {
4908                 if (IS_GEN(dev_priv, 2)) {
4909                         dev->driver->irq_preinstall = i8xx_irq_reset;
4910                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4911                         dev->driver->irq_handler = i8xx_irq_handler;
4912                         dev->driver->irq_uninstall = i8xx_irq_reset;
4913                         dev->driver->enable_vblank = i8xx_enable_vblank;
4914                         dev->driver->disable_vblank = i8xx_disable_vblank;
4915                 } else if (IS_GEN(dev_priv, 3)) {
4916                         dev->driver->irq_preinstall = i915_irq_reset;
4917                         dev->driver->irq_postinstall = i915_irq_postinstall;
4918                         dev->driver->irq_uninstall = i915_irq_reset;
4919                         dev->driver->irq_handler = i915_irq_handler;
4920                         dev->driver->enable_vblank = i8xx_enable_vblank;
4921                         dev->driver->disable_vblank = i8xx_disable_vblank;
4922                 } else {
4923                         dev->driver->irq_preinstall = i965_irq_reset;
4924                         dev->driver->irq_postinstall = i965_irq_postinstall;
4925                         dev->driver->irq_uninstall = i965_irq_reset;
4926                         dev->driver->irq_handler = i965_irq_handler;
4927                         dev->driver->enable_vblank = i965_enable_vblank;
4928                         dev->driver->disable_vblank = i965_disable_vblank;
4929                 }
4930                 if (I915_HAS_HOTPLUG(dev_priv))
4931                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4932         }
4933 }
4934
4935 /**
4936  * intel_irq_fini - deinitializes IRQ support
4937  * @i915: i915 device instance
4938  *
4939  * This function deinitializes all the IRQ support.
4940  */
4941 void intel_irq_fini(struct drm_i915_private *i915)
4942 {
4943         int i;
4944
4945         for (i = 0; i < MAX_L3_SLICES; ++i)
4946                 kfree(i915->l3_parity.remap_info[i]);
4947 }
4948
4949 /**
4950  * intel_irq_install - enables the hardware interrupt
4951  * @dev_priv: i915 device instance
4952  *
4953  * This function enables the hardware interrupt handling, but leaves the hotplug
4954  * handling still disabled. It is called after intel_irq_init().
4955  *
4956  * In the driver load and resume code we need working interrupts in a few places
4957  * but don't want to deal with the hassle of concurrent probe and hotplug
4958  * workers. Hence the split into this two-stage approach.
4959  */
4960 int intel_irq_install(struct drm_i915_private *dev_priv)
4961 {
4962         /*
4963          * We enable some interrupt sources in our postinstall hooks, so mark
4964          * interrupts as enabled _before_ actually enabling them to avoid
4965          * special cases in our ordering checks.
4966          */
4967         dev_priv->runtime_pm.irqs_enabled = true;
4968
4969         return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4970 }
4971
4972 /**
4973  * intel_irq_uninstall - finilizes all irq handling
4974  * @dev_priv: i915 device instance
4975  *
4976  * This stops interrupt and hotplug handling and unregisters and frees all
4977  * resources acquired in the init functions.
4978  */
4979 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4980 {
4981         drm_irq_uninstall(&dev_priv->drm);
4982         intel_hpd_cancel_work(dev_priv);
4983         dev_priv->runtime_pm.irqs_enabled = false;
4984 }
4985
4986 /**
4987  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4988  * @dev_priv: i915 device instance
4989  *
4990  * This function is used to disable interrupts at runtime, both in the runtime
4991  * pm and the system suspend/resume code.
4992  */
4993 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4994 {
4995         dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4996         dev_priv->runtime_pm.irqs_enabled = false;
4997         synchronize_irq(dev_priv->drm.irq);
4998 }
4999
5000 /**
5001  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
5002  * @dev_priv: i915 device instance
5003  *
5004  * This function is used to enable interrupts at runtime, both in the runtime
5005  * pm and the system suspend/resume code.
5006  */
5007 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
5008 {
5009         dev_priv->runtime_pm.irqs_enabled = true;
5010         dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
5011         dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
5012 }