drm/i915: Don't enable hpd detection logic from irq_postinstall()
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34
35 #include <drm/drm_drv.h>
36 #include <drm/drm_irq.h>
37
38 #include "display/intel_display_types.h"
39 #include "display/intel_fifo_underrun.h"
40 #include "display/intel_hotplug.h"
41 #include "display/intel_lpe_audio.h"
42 #include "display/intel_psr.h"
43
44 #include "gt/intel_breadcrumbs.h"
45 #include "gt/intel_gt.h"
46 #include "gt/intel_gt_irq.h"
47 #include "gt/intel_gt_pm_irq.h"
48 #include "gt/intel_rps.h"
49
50 #include "i915_drv.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_pm.h"
54
55 /**
56  * DOC: interrupt handling
57  *
58  * These functions provide the basic support for enabling and disabling the
59  * interrupt handling support. There's a lot more functionality in i915_irq.c
60  * and related files, but that will be described in separate chapters.
61  */
62
63 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
64
65 static const u32 hpd_ilk[HPD_NUM_PINS] = {
66         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
67 };
68
69 static const u32 hpd_ivb[HPD_NUM_PINS] = {
70         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
71 };
72
73 static const u32 hpd_bdw[HPD_NUM_PINS] = {
74         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
75 };
76
77 static const u32 hpd_ibx[HPD_NUM_PINS] = {
78         [HPD_CRT] = SDE_CRT_HOTPLUG,
79         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
80         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
81         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
82         [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
83 };
84
85 static const u32 hpd_cpt[HPD_NUM_PINS] = {
86         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
87         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
88         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
89         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
90         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
91 };
92
93 static const u32 hpd_spt[HPD_NUM_PINS] = {
94         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
95         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
96         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
97         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
98         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
99 };
100
101 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
102         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
103         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
104         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
105         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
106         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
107         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
108 };
109
110 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
111         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
112         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
113         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
114         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
115         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
116         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
117 };
118
119 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
120         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
121         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
122         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
123         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
124         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
125         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
126 };
127
128 static const u32 hpd_bxt[HPD_NUM_PINS] = {
129         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
130         [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
131         [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
132 };
133
134 static const u32 hpd_gen11[HPD_NUM_PINS] = {
135         [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
136         [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
137         [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
138         [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
139         [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
140         [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
141 };
142
143 static const u32 hpd_icp[HPD_NUM_PINS] = {
144         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
145         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
146         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
147         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
148         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
149         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
150         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
151         [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
152         [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
153 };
154
155 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
156         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
157         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
158         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
159         [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
160 };
161
162 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
163 {
164         struct i915_hotplug *hpd = &dev_priv->hotplug;
165
166         if (HAS_GMCH(dev_priv)) {
167                 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
168                     IS_CHERRYVIEW(dev_priv))
169                         hpd->hpd = hpd_status_g4x;
170                 else
171                         hpd->hpd = hpd_status_i915;
172                 return;
173         }
174
175         if (INTEL_GEN(dev_priv) >= 11)
176                 hpd->hpd = hpd_gen11;
177         else if (IS_GEN9_LP(dev_priv))
178                 hpd->hpd = hpd_bxt;
179         else if (INTEL_GEN(dev_priv) >= 8)
180                 hpd->hpd = hpd_bdw;
181         else if (INTEL_GEN(dev_priv) >= 7)
182                 hpd->hpd = hpd_ivb;
183         else
184                 hpd->hpd = hpd_ilk;
185
186         if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
187             (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
188                 return;
189
190         if (HAS_PCH_DG1(dev_priv))
191                 hpd->pch_hpd = hpd_sde_dg1;
192         else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
193                  HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
194                 hpd->pch_hpd = hpd_icp;
195         else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
196                 hpd->pch_hpd = hpd_spt;
197         else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
198                 hpd->pch_hpd = hpd_cpt;
199         else if (HAS_PCH_IBX(dev_priv))
200                 hpd->pch_hpd = hpd_ibx;
201         else
202                 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
203 }
204
205 static void
206 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
207 {
208         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
209
210         drm_crtc_handle_vblank(&crtc->base);
211 }
212
213 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
214                     i915_reg_t iir, i915_reg_t ier)
215 {
216         intel_uncore_write(uncore, imr, 0xffffffff);
217         intel_uncore_posting_read(uncore, imr);
218
219         intel_uncore_write(uncore, ier, 0);
220
221         /* IIR can theoretically queue up two events. Be paranoid. */
222         intel_uncore_write(uncore, iir, 0xffffffff);
223         intel_uncore_posting_read(uncore, iir);
224         intel_uncore_write(uncore, iir, 0xffffffff);
225         intel_uncore_posting_read(uncore, iir);
226 }
227
228 void gen2_irq_reset(struct intel_uncore *uncore)
229 {
230         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
231         intel_uncore_posting_read16(uncore, GEN2_IMR);
232
233         intel_uncore_write16(uncore, GEN2_IER, 0);
234
235         /* IIR can theoretically queue up two events. Be paranoid. */
236         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
237         intel_uncore_posting_read16(uncore, GEN2_IIR);
238         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
239         intel_uncore_posting_read16(uncore, GEN2_IIR);
240 }
241
242 /*
243  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
244  */
245 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
246 {
247         u32 val = intel_uncore_read(uncore, reg);
248
249         if (val == 0)
250                 return;
251
252         drm_WARN(&uncore->i915->drm, 1,
253                  "Interrupt register 0x%x is not zero: 0x%08x\n",
254                  i915_mmio_reg_offset(reg), val);
255         intel_uncore_write(uncore, reg, 0xffffffff);
256         intel_uncore_posting_read(uncore, reg);
257         intel_uncore_write(uncore, reg, 0xffffffff);
258         intel_uncore_posting_read(uncore, reg);
259 }
260
261 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
262 {
263         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
264
265         if (val == 0)
266                 return;
267
268         drm_WARN(&uncore->i915->drm, 1,
269                  "Interrupt register 0x%x is not zero: 0x%08x\n",
270                  i915_mmio_reg_offset(GEN2_IIR), val);
271         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
272         intel_uncore_posting_read16(uncore, GEN2_IIR);
273         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
274         intel_uncore_posting_read16(uncore, GEN2_IIR);
275 }
276
277 void gen3_irq_init(struct intel_uncore *uncore,
278                    i915_reg_t imr, u32 imr_val,
279                    i915_reg_t ier, u32 ier_val,
280                    i915_reg_t iir)
281 {
282         gen3_assert_iir_is_zero(uncore, iir);
283
284         intel_uncore_write(uncore, ier, ier_val);
285         intel_uncore_write(uncore, imr, imr_val);
286         intel_uncore_posting_read(uncore, imr);
287 }
288
289 void gen2_irq_init(struct intel_uncore *uncore,
290                    u32 imr_val, u32 ier_val)
291 {
292         gen2_assert_iir_is_zero(uncore);
293
294         intel_uncore_write16(uncore, GEN2_IER, ier_val);
295         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
296         intel_uncore_posting_read16(uncore, GEN2_IMR);
297 }
298
299 /* For display hotplug interrupt */
300 static inline void
301 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
302                                      u32 mask,
303                                      u32 bits)
304 {
305         u32 val;
306
307         lockdep_assert_held(&dev_priv->irq_lock);
308         drm_WARN_ON(&dev_priv->drm, bits & ~mask);
309
310         val = I915_READ(PORT_HOTPLUG_EN);
311         val &= ~mask;
312         val |= bits;
313         I915_WRITE(PORT_HOTPLUG_EN, val);
314 }
315
316 /**
317  * i915_hotplug_interrupt_update - update hotplug interrupt enable
318  * @dev_priv: driver private
319  * @mask: bits to update
320  * @bits: bits to enable
321  * NOTE: the HPD enable bits are modified both inside and outside
322  * of an interrupt context. To avoid that read-modify-write cycles
323  * interfer, these bits are protected by a spinlock. Since this
324  * function is usually not called from a context where the lock is
325  * held already, this function acquires the lock itself. A non-locking
326  * version is also available.
327  */
328 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
329                                    u32 mask,
330                                    u32 bits)
331 {
332         spin_lock_irq(&dev_priv->irq_lock);
333         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
334         spin_unlock_irq(&dev_priv->irq_lock);
335 }
336
337 /**
338  * ilk_update_display_irq - update DEIMR
339  * @dev_priv: driver private
340  * @interrupt_mask: mask of interrupt bits to update
341  * @enabled_irq_mask: mask of interrupt bits to enable
342  */
343 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
344                             u32 interrupt_mask,
345                             u32 enabled_irq_mask)
346 {
347         u32 new_val;
348
349         lockdep_assert_held(&dev_priv->irq_lock);
350         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
351
352         new_val = dev_priv->irq_mask;
353         new_val &= ~interrupt_mask;
354         new_val |= (~enabled_irq_mask & interrupt_mask);
355
356         if (new_val != dev_priv->irq_mask &&
357             !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
358                 dev_priv->irq_mask = new_val;
359                 I915_WRITE(DEIMR, dev_priv->irq_mask);
360                 POSTING_READ(DEIMR);
361         }
362 }
363
364 /**
365  * bdw_update_port_irq - update DE port interrupt
366  * @dev_priv: driver private
367  * @interrupt_mask: mask of interrupt bits to update
368  * @enabled_irq_mask: mask of interrupt bits to enable
369  */
370 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
371                                 u32 interrupt_mask,
372                                 u32 enabled_irq_mask)
373 {
374         u32 new_val;
375         u32 old_val;
376
377         lockdep_assert_held(&dev_priv->irq_lock);
378
379         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
380
381         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
382                 return;
383
384         old_val = I915_READ(GEN8_DE_PORT_IMR);
385
386         new_val = old_val;
387         new_val &= ~interrupt_mask;
388         new_val |= (~enabled_irq_mask & interrupt_mask);
389
390         if (new_val != old_val) {
391                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
392                 POSTING_READ(GEN8_DE_PORT_IMR);
393         }
394 }
395
396 /**
397  * bdw_update_pipe_irq - update DE pipe interrupt
398  * @dev_priv: driver private
399  * @pipe: pipe whose interrupt to update
400  * @interrupt_mask: mask of interrupt bits to update
401  * @enabled_irq_mask: mask of interrupt bits to enable
402  */
403 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
404                          enum pipe pipe,
405                          u32 interrupt_mask,
406                          u32 enabled_irq_mask)
407 {
408         u32 new_val;
409
410         lockdep_assert_held(&dev_priv->irq_lock);
411
412         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
413
414         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
415                 return;
416
417         new_val = dev_priv->de_irq_mask[pipe];
418         new_val &= ~interrupt_mask;
419         new_val |= (~enabled_irq_mask & interrupt_mask);
420
421         if (new_val != dev_priv->de_irq_mask[pipe]) {
422                 dev_priv->de_irq_mask[pipe] = new_val;
423                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
424                 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
425         }
426 }
427
428 /**
429  * ibx_display_interrupt_update - update SDEIMR
430  * @dev_priv: driver private
431  * @interrupt_mask: mask of interrupt bits to update
432  * @enabled_irq_mask: mask of interrupt bits to enable
433  */
434 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
435                                   u32 interrupt_mask,
436                                   u32 enabled_irq_mask)
437 {
438         u32 sdeimr = I915_READ(SDEIMR);
439         sdeimr &= ~interrupt_mask;
440         sdeimr |= (~enabled_irq_mask & interrupt_mask);
441
442         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
443
444         lockdep_assert_held(&dev_priv->irq_lock);
445
446         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
447                 return;
448
449         I915_WRITE(SDEIMR, sdeimr);
450         POSTING_READ(SDEIMR);
451 }
452
453 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
454                               enum pipe pipe)
455 {
456         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
457         u32 enable_mask = status_mask << 16;
458
459         lockdep_assert_held(&dev_priv->irq_lock);
460
461         if (INTEL_GEN(dev_priv) < 5)
462                 goto out;
463
464         /*
465          * On pipe A we don't support the PSR interrupt yet,
466          * on pipe B and C the same bit MBZ.
467          */
468         if (drm_WARN_ON_ONCE(&dev_priv->drm,
469                              status_mask & PIPE_A_PSR_STATUS_VLV))
470                 return 0;
471         /*
472          * On pipe B and C we don't support the PSR interrupt yet, on pipe
473          * A the same bit is for perf counters which we don't use either.
474          */
475         if (drm_WARN_ON_ONCE(&dev_priv->drm,
476                              status_mask & PIPE_B_PSR_STATUS_VLV))
477                 return 0;
478
479         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
480                          SPRITE0_FLIP_DONE_INT_EN_VLV |
481                          SPRITE1_FLIP_DONE_INT_EN_VLV);
482         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
483                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
484         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
485                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
486
487 out:
488         drm_WARN_ONCE(&dev_priv->drm,
489                       enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
490                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
491                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
492                       pipe_name(pipe), enable_mask, status_mask);
493
494         return enable_mask;
495 }
496
497 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
498                           enum pipe pipe, u32 status_mask)
499 {
500         i915_reg_t reg = PIPESTAT(pipe);
501         u32 enable_mask;
502
503         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
504                       "pipe %c: status_mask=0x%x\n",
505                       pipe_name(pipe), status_mask);
506
507         lockdep_assert_held(&dev_priv->irq_lock);
508         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
509
510         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
511                 return;
512
513         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
514         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
515
516         I915_WRITE(reg, enable_mask | status_mask);
517         POSTING_READ(reg);
518 }
519
520 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
521                            enum pipe pipe, u32 status_mask)
522 {
523         i915_reg_t reg = PIPESTAT(pipe);
524         u32 enable_mask;
525
526         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
527                       "pipe %c: status_mask=0x%x\n",
528                       pipe_name(pipe), status_mask);
529
530         lockdep_assert_held(&dev_priv->irq_lock);
531         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
532
533         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
534                 return;
535
536         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
537         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
538
539         I915_WRITE(reg, enable_mask | status_mask);
540         POSTING_READ(reg);
541 }
542
543 static bool i915_has_asle(struct drm_i915_private *dev_priv)
544 {
545         if (!dev_priv->opregion.asle)
546                 return false;
547
548         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
549 }
550
551 /**
552  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
553  * @dev_priv: i915 device private
554  */
555 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
556 {
557         if (!i915_has_asle(dev_priv))
558                 return;
559
560         spin_lock_irq(&dev_priv->irq_lock);
561
562         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
563         if (INTEL_GEN(dev_priv) >= 4)
564                 i915_enable_pipestat(dev_priv, PIPE_A,
565                                      PIPE_LEGACY_BLC_EVENT_STATUS);
566
567         spin_unlock_irq(&dev_priv->irq_lock);
568 }
569
570 /*
571  * This timing diagram depicts the video signal in and
572  * around the vertical blanking period.
573  *
574  * Assumptions about the fictitious mode used in this example:
575  *  vblank_start >= 3
576  *  vsync_start = vblank_start + 1
577  *  vsync_end = vblank_start + 2
578  *  vtotal = vblank_start + 3
579  *
580  *           start of vblank:
581  *           latch double buffered registers
582  *           increment frame counter (ctg+)
583  *           generate start of vblank interrupt (gen4+)
584  *           |
585  *           |          frame start:
586  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
587  *           |          may be shifted forward 1-3 extra lines via PIPECONF
588  *           |          |
589  *           |          |  start of vsync:
590  *           |          |  generate vsync interrupt
591  *           |          |  |
592  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
593  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
594  * ----va---> <-----------------vb--------------------> <--------va-------------
595  *       |          |       <----vs----->                     |
596  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
597  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
598  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
599  *       |          |                                         |
600  *       last visible pixel                                   first visible pixel
601  *                  |                                         increment frame counter (gen3/4)
602  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
603  *
604  * x  = horizontal active
605  * _  = horizontal blanking
606  * hs = horizontal sync
607  * va = vertical active
608  * vb = vertical blanking
609  * vs = vertical sync
610  * vbs = vblank_start (number)
611  *
612  * Summary:
613  * - most events happen at the start of horizontal sync
614  * - frame start happens at the start of horizontal blank, 1-4 lines
615  *   (depending on PIPECONF settings) after the start of vblank
616  * - gen3/4 pixel and frame counter are synchronized with the start
617  *   of horizontal active on the first line of vertical active
618  */
619
620 /* Called from drm generic code, passed a 'crtc', which
621  * we use as a pipe index
622  */
623 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
624 {
625         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
626         struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
627         const struct drm_display_mode *mode = &vblank->hwmode;
628         enum pipe pipe = to_intel_crtc(crtc)->pipe;
629         i915_reg_t high_frame, low_frame;
630         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
631         unsigned long irqflags;
632
633         /*
634          * On i965gm TV output the frame counter only works up to
635          * the point when we enable the TV encoder. After that the
636          * frame counter ceases to work and reads zero. We need a
637          * vblank wait before enabling the TV encoder and so we
638          * have to enable vblank interrupts while the frame counter
639          * is still in a working state. However the core vblank code
640          * does not like us returning non-zero frame counter values
641          * when we've told it that we don't have a working frame
642          * counter. Thus we must stop non-zero values leaking out.
643          */
644         if (!vblank->max_vblank_count)
645                 return 0;
646
647         htotal = mode->crtc_htotal;
648         hsync_start = mode->crtc_hsync_start;
649         vbl_start = mode->crtc_vblank_start;
650         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
651                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
652
653         /* Convert to pixel count */
654         vbl_start *= htotal;
655
656         /* Start of vblank event occurs at start of hsync */
657         vbl_start -= htotal - hsync_start;
658
659         high_frame = PIPEFRAME(pipe);
660         low_frame = PIPEFRAMEPIXEL(pipe);
661
662         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
663
664         /*
665          * High & low register fields aren't synchronized, so make sure
666          * we get a low value that's stable across two reads of the high
667          * register.
668          */
669         do {
670                 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
671                 low   = intel_de_read_fw(dev_priv, low_frame);
672                 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
673         } while (high1 != high2);
674
675         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
676
677         high1 >>= PIPE_FRAME_HIGH_SHIFT;
678         pixel = low & PIPE_PIXEL_MASK;
679         low >>= PIPE_FRAME_LOW_SHIFT;
680
681         /*
682          * The frame counter increments at beginning of active.
683          * Cook up a vblank counter by also checking the pixel
684          * counter against vblank start.
685          */
686         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
687 }
688
689 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
690 {
691         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
692         struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
693         enum pipe pipe = to_intel_crtc(crtc)->pipe;
694
695         if (!vblank->max_vblank_count)
696                 return 0;
697
698         return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
699 }
700
701 /*
702  * On certain encoders on certain platforms, pipe
703  * scanline register will not work to get the scanline,
704  * since the timings are driven from the PORT or issues
705  * with scanline register updates.
706  * This function will use Framestamp and current
707  * timestamp registers to calculate the scanline.
708  */
709 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
710 {
711         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
712         struct drm_vblank_crtc *vblank =
713                 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
714         const struct drm_display_mode *mode = &vblank->hwmode;
715         u32 vblank_start = mode->crtc_vblank_start;
716         u32 vtotal = mode->crtc_vtotal;
717         u32 htotal = mode->crtc_htotal;
718         u32 clock = mode->crtc_clock;
719         u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
720
721         /*
722          * To avoid the race condition where we might cross into the
723          * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
724          * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
725          * during the same frame.
726          */
727         do {
728                 /*
729                  * This field provides read back of the display
730                  * pipe frame time stamp. The time stamp value
731                  * is sampled at every start of vertical blank.
732                  */
733                 scan_prev_time = intel_de_read_fw(dev_priv,
734                                                   PIPE_FRMTMSTMP(crtc->pipe));
735
736                 /*
737                  * The TIMESTAMP_CTR register has the current
738                  * time stamp value.
739                  */
740                 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
741
742                 scan_post_time = intel_de_read_fw(dev_priv,
743                                                   PIPE_FRMTMSTMP(crtc->pipe));
744         } while (scan_post_time != scan_prev_time);
745
746         scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
747                                         clock), 1000 * htotal);
748         scanline = min(scanline, vtotal - 1);
749         scanline = (scanline + vblank_start) % vtotal;
750
751         return scanline;
752 }
753
754 /*
755  * intel_de_read_fw(), only for fast reads of display block, no need for
756  * forcewake etc.
757  */
758 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
759 {
760         struct drm_device *dev = crtc->base.dev;
761         struct drm_i915_private *dev_priv = to_i915(dev);
762         const struct drm_display_mode *mode;
763         struct drm_vblank_crtc *vblank;
764         enum pipe pipe = crtc->pipe;
765         int position, vtotal;
766
767         if (!crtc->active)
768                 return -1;
769
770         vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
771         mode = &vblank->hwmode;
772
773         if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
774                 return __intel_get_crtc_scanline_from_timestamp(crtc);
775
776         vtotal = mode->crtc_vtotal;
777         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
778                 vtotal /= 2;
779
780         if (IS_GEN(dev_priv, 2))
781                 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
782         else
783                 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
784
785         /*
786          * On HSW, the DSL reg (0x70000) appears to return 0 if we
787          * read it just before the start of vblank.  So try it again
788          * so we don't accidentally end up spanning a vblank frame
789          * increment, causing the pipe_update_end() code to squak at us.
790          *
791          * The nature of this problem means we can't simply check the ISR
792          * bit and return the vblank start value; nor can we use the scanline
793          * debug register in the transcoder as it appears to have the same
794          * problem.  We may need to extend this to include other platforms,
795          * but so far testing only shows the problem on HSW.
796          */
797         if (HAS_DDI(dev_priv) && !position) {
798                 int i, temp;
799
800                 for (i = 0; i < 100; i++) {
801                         udelay(1);
802                         temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
803                         if (temp != position) {
804                                 position = temp;
805                                 break;
806                         }
807                 }
808         }
809
810         /*
811          * See update_scanline_offset() for the details on the
812          * scanline_offset adjustment.
813          */
814         return (position + crtc->scanline_offset) % vtotal;
815 }
816
817 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
818                                      bool in_vblank_irq,
819                                      int *vpos, int *hpos,
820                                      ktime_t *stime, ktime_t *etime,
821                                      const struct drm_display_mode *mode)
822 {
823         struct drm_device *dev = _crtc->dev;
824         struct drm_i915_private *dev_priv = to_i915(dev);
825         struct intel_crtc *crtc = to_intel_crtc(_crtc);
826         enum pipe pipe = crtc->pipe;
827         int position;
828         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
829         unsigned long irqflags;
830         bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
831                 IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
832                 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
833
834         if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
835                 drm_dbg(&dev_priv->drm,
836                         "trying to get scanoutpos for disabled "
837                         "pipe %c\n", pipe_name(pipe));
838                 return false;
839         }
840
841         htotal = mode->crtc_htotal;
842         hsync_start = mode->crtc_hsync_start;
843         vtotal = mode->crtc_vtotal;
844         vbl_start = mode->crtc_vblank_start;
845         vbl_end = mode->crtc_vblank_end;
846
847         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
848                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
849                 vbl_end /= 2;
850                 vtotal /= 2;
851         }
852
853         /*
854          * Lock uncore.lock, as we will do multiple timing critical raw
855          * register reads, potentially with preemption disabled, so the
856          * following code must not block on uncore.lock.
857          */
858         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
859
860         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
861
862         /* Get optional system timestamp before query. */
863         if (stime)
864                 *stime = ktime_get();
865
866         if (use_scanline_counter) {
867                 /* No obvious pixelcount register. Only query vertical
868                  * scanout position from Display scan line register.
869                  */
870                 position = __intel_get_crtc_scanline(crtc);
871         } else {
872                 /* Have access to pixelcount since start of frame.
873                  * We can split this into vertical and horizontal
874                  * scanout position.
875                  */
876                 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
877
878                 /* convert to pixel counts */
879                 vbl_start *= htotal;
880                 vbl_end *= htotal;
881                 vtotal *= htotal;
882
883                 /*
884                  * In interlaced modes, the pixel counter counts all pixels,
885                  * so one field will have htotal more pixels. In order to avoid
886                  * the reported position from jumping backwards when the pixel
887                  * counter is beyond the length of the shorter field, just
888                  * clamp the position the length of the shorter field. This
889                  * matches how the scanline counter based position works since
890                  * the scanline counter doesn't count the two half lines.
891                  */
892                 if (position >= vtotal)
893                         position = vtotal - 1;
894
895                 /*
896                  * Start of vblank interrupt is triggered at start of hsync,
897                  * just prior to the first active line of vblank. However we
898                  * consider lines to start at the leading edge of horizontal
899                  * active. So, should we get here before we've crossed into
900                  * the horizontal active of the first line in vblank, we would
901                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
902                  * always add htotal-hsync_start to the current pixel position.
903                  */
904                 position = (position + htotal - hsync_start) % vtotal;
905         }
906
907         /* Get optional system timestamp after query. */
908         if (etime)
909                 *etime = ktime_get();
910
911         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
912
913         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
914
915         /*
916          * While in vblank, position will be negative
917          * counting up towards 0 at vbl_end. And outside
918          * vblank, position will be positive counting
919          * up since vbl_end.
920          */
921         if (position >= vbl_start)
922                 position -= vbl_end;
923         else
924                 position += vtotal - vbl_end;
925
926         if (use_scanline_counter) {
927                 *vpos = position;
928                 *hpos = 0;
929         } else {
930                 *vpos = position / htotal;
931                 *hpos = position - (*vpos * htotal);
932         }
933
934         return true;
935 }
936
937 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
938                                      ktime_t *vblank_time, bool in_vblank_irq)
939 {
940         return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
941                 crtc, max_error, vblank_time, in_vblank_irq,
942                 i915_get_crtc_scanoutpos);
943 }
944
945 int intel_get_crtc_scanline(struct intel_crtc *crtc)
946 {
947         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
948         unsigned long irqflags;
949         int position;
950
951         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
952         position = __intel_get_crtc_scanline(crtc);
953         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
954
955         return position;
956 }
957
958 /**
959  * ivb_parity_work - Workqueue called when a parity error interrupt
960  * occurred.
961  * @work: workqueue struct
962  *
963  * Doesn't actually do anything except notify userspace. As a consequence of
964  * this event, userspace should try to remap the bad rows since statistically
965  * it is likely the same row is more likely to go bad again.
966  */
967 static void ivb_parity_work(struct work_struct *work)
968 {
969         struct drm_i915_private *dev_priv =
970                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
971         struct intel_gt *gt = &dev_priv->gt;
972         u32 error_status, row, bank, subbank;
973         char *parity_event[6];
974         u32 misccpctl;
975         u8 slice = 0;
976
977         /* We must turn off DOP level clock gating to access the L3 registers.
978          * In order to prevent a get/put style interface, acquire struct mutex
979          * any time we access those registers.
980          */
981         mutex_lock(&dev_priv->drm.struct_mutex);
982
983         /* If we've screwed up tracking, just let the interrupt fire again */
984         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
985                 goto out;
986
987         misccpctl = I915_READ(GEN7_MISCCPCTL);
988         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
989         POSTING_READ(GEN7_MISCCPCTL);
990
991         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
992                 i915_reg_t reg;
993
994                 slice--;
995                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
996                                      slice >= NUM_L3_SLICES(dev_priv)))
997                         break;
998
999                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1000
1001                 reg = GEN7_L3CDERRST1(slice);
1002
1003                 error_status = I915_READ(reg);
1004                 row = GEN7_PARITY_ERROR_ROW(error_status);
1005                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1006                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1007
1008                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1009                 POSTING_READ(reg);
1010
1011                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1012                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1013                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1014                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1015                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1016                 parity_event[5] = NULL;
1017
1018                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1019                                    KOBJ_CHANGE, parity_event);
1020
1021                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1022                           slice, row, bank, subbank);
1023
1024                 kfree(parity_event[4]);
1025                 kfree(parity_event[3]);
1026                 kfree(parity_event[2]);
1027                 kfree(parity_event[1]);
1028         }
1029
1030         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1031
1032 out:
1033         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1034         spin_lock_irq(&gt->irq_lock);
1035         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1036         spin_unlock_irq(&gt->irq_lock);
1037
1038         mutex_unlock(&dev_priv->drm.struct_mutex);
1039 }
1040
1041 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1042 {
1043         switch (pin) {
1044         case HPD_PORT_TC1:
1045                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
1046         case HPD_PORT_TC2:
1047                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
1048         case HPD_PORT_TC3:
1049                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
1050         case HPD_PORT_TC4:
1051                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
1052         case HPD_PORT_TC5:
1053                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
1054         case HPD_PORT_TC6:
1055                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
1056         default:
1057                 return false;
1058         }
1059 }
1060
1061 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1062 {
1063         switch (pin) {
1064         case HPD_PORT_A:
1065                 return val & PORTA_HOTPLUG_LONG_DETECT;
1066         case HPD_PORT_B:
1067                 return val & PORTB_HOTPLUG_LONG_DETECT;
1068         case HPD_PORT_C:
1069                 return val & PORTC_HOTPLUG_LONG_DETECT;
1070         default:
1071                 return false;
1072         }
1073 }
1074
1075 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1076 {
1077         switch (pin) {
1078         case HPD_PORT_A:
1079                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
1080         case HPD_PORT_B:
1081                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
1082         case HPD_PORT_C:
1083                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
1084         case HPD_PORT_D:
1085                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
1086         default:
1087                 return false;
1088         }
1089 }
1090
1091 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1092 {
1093         switch (pin) {
1094         case HPD_PORT_TC1:
1095                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
1096         case HPD_PORT_TC2:
1097                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
1098         case HPD_PORT_TC3:
1099                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
1100         case HPD_PORT_TC4:
1101                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
1102         case HPD_PORT_TC5:
1103                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
1104         case HPD_PORT_TC6:
1105                 return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
1106         default:
1107                 return false;
1108         }
1109 }
1110
1111 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1112 {
1113         switch (pin) {
1114         case HPD_PORT_E:
1115                 return val & PORTE_HOTPLUG_LONG_DETECT;
1116         default:
1117                 return false;
1118         }
1119 }
1120
1121 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1122 {
1123         switch (pin) {
1124         case HPD_PORT_A:
1125                 return val & PORTA_HOTPLUG_LONG_DETECT;
1126         case HPD_PORT_B:
1127                 return val & PORTB_HOTPLUG_LONG_DETECT;
1128         case HPD_PORT_C:
1129                 return val & PORTC_HOTPLUG_LONG_DETECT;
1130         case HPD_PORT_D:
1131                 return val & PORTD_HOTPLUG_LONG_DETECT;
1132         default:
1133                 return false;
1134         }
1135 }
1136
1137 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1138 {
1139         switch (pin) {
1140         case HPD_PORT_A:
1141                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1142         default:
1143                 return false;
1144         }
1145 }
1146
1147 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1148 {
1149         switch (pin) {
1150         case HPD_PORT_B:
1151                 return val & PORTB_HOTPLUG_LONG_DETECT;
1152         case HPD_PORT_C:
1153                 return val & PORTC_HOTPLUG_LONG_DETECT;
1154         case HPD_PORT_D:
1155                 return val & PORTD_HOTPLUG_LONG_DETECT;
1156         default:
1157                 return false;
1158         }
1159 }
1160
1161 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1162 {
1163         switch (pin) {
1164         case HPD_PORT_B:
1165                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1166         case HPD_PORT_C:
1167                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1168         case HPD_PORT_D:
1169                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1170         default:
1171                 return false;
1172         }
1173 }
1174
1175 /*
1176  * Get a bit mask of pins that have triggered, and which ones may be long.
1177  * This can be called multiple times with the same masks to accumulate
1178  * hotplug detection results from several registers.
1179  *
1180  * Note that the caller is expected to zero out the masks initially.
1181  */
1182 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1183                                u32 *pin_mask, u32 *long_mask,
1184                                u32 hotplug_trigger, u32 dig_hotplug_reg,
1185                                const u32 hpd[HPD_NUM_PINS],
1186                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
1187 {
1188         enum hpd_pin pin;
1189
1190         BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1191
1192         for_each_hpd_pin(pin) {
1193                 if ((hpd[pin] & hotplug_trigger) == 0)
1194                         continue;
1195
1196                 *pin_mask |= BIT(pin);
1197
1198                 if (long_pulse_detect(pin, dig_hotplug_reg))
1199                         *long_mask |= BIT(pin);
1200         }
1201
1202         drm_dbg(&dev_priv->drm,
1203                 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1204                 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1205
1206 }
1207
1208 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1209                                   const u32 hpd[HPD_NUM_PINS])
1210 {
1211         struct intel_encoder *encoder;
1212         u32 enabled_irqs = 0;
1213
1214         for_each_intel_encoder(&dev_priv->drm, encoder)
1215                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1216                         enabled_irqs |= hpd[encoder->hpd_pin];
1217
1218         return enabled_irqs;
1219 }
1220
1221 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1222                                   const u32 hpd[HPD_NUM_PINS])
1223 {
1224         struct intel_encoder *encoder;
1225         u32 hotplug_irqs = 0;
1226
1227         for_each_intel_encoder(&dev_priv->drm, encoder)
1228                 hotplug_irqs |= hpd[encoder->hpd_pin];
1229
1230         return hotplug_irqs;
1231 }
1232
1233 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1234 {
1235         wake_up_all(&dev_priv->gmbus_wait_queue);
1236 }
1237
1238 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1239 {
1240         wake_up_all(&dev_priv->gmbus_wait_queue);
1241 }
1242
1243 #if defined(CONFIG_DEBUG_FS)
1244 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1245                                          enum pipe pipe,
1246                                          u32 crc0, u32 crc1,
1247                                          u32 crc2, u32 crc3,
1248                                          u32 crc4)
1249 {
1250         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1251         struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1252         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1253
1254         trace_intel_pipe_crc(crtc, crcs);
1255
1256         spin_lock(&pipe_crc->lock);
1257         /*
1258          * For some not yet identified reason, the first CRC is
1259          * bonkers. So let's just wait for the next vblank and read
1260          * out the buggy result.
1261          *
1262          * On GEN8+ sometimes the second CRC is bonkers as well, so
1263          * don't trust that one either.
1264          */
1265         if (pipe_crc->skipped <= 0 ||
1266             (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1267                 pipe_crc->skipped++;
1268                 spin_unlock(&pipe_crc->lock);
1269                 return;
1270         }
1271         spin_unlock(&pipe_crc->lock);
1272
1273         drm_crtc_add_crc_entry(&crtc->base, true,
1274                                 drm_crtc_accurate_vblank_count(&crtc->base),
1275                                 crcs);
1276 }
1277 #else
1278 static inline void
1279 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1280                              enum pipe pipe,
1281                              u32 crc0, u32 crc1,
1282                              u32 crc2, u32 crc3,
1283                              u32 crc4) {}
1284 #endif
1285
1286 static void flip_done_handler(struct drm_i915_private *i915,
1287                               enum pipe pipe)
1288 {
1289         struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
1290         struct drm_crtc_state *crtc_state = crtc->base.state;
1291         struct drm_pending_vblank_event *e = crtc_state->event;
1292         struct drm_device *dev = &i915->drm;
1293         unsigned long irqflags;
1294
1295         spin_lock_irqsave(&dev->event_lock, irqflags);
1296
1297         crtc_state->event = NULL;
1298
1299         drm_crtc_send_vblank_event(&crtc->base, e);
1300
1301         spin_unlock_irqrestore(&dev->event_lock, irqflags);
1302 }
1303
1304 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1305                                      enum pipe pipe)
1306 {
1307         display_pipe_crc_irq_handler(dev_priv, pipe,
1308                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1309                                      0, 0, 0, 0);
1310 }
1311
1312 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1313                                      enum pipe pipe)
1314 {
1315         display_pipe_crc_irq_handler(dev_priv, pipe,
1316                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1317                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1318                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1319                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1320                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1321 }
1322
1323 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1324                                       enum pipe pipe)
1325 {
1326         u32 res1, res2;
1327
1328         if (INTEL_GEN(dev_priv) >= 3)
1329                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1330         else
1331                 res1 = 0;
1332
1333         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1334                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1335         else
1336                 res2 = 0;
1337
1338         display_pipe_crc_irq_handler(dev_priv, pipe,
1339                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1340                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1341                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1342                                      res1, res2);
1343 }
1344
1345 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1346 {
1347         enum pipe pipe;
1348
1349         for_each_pipe(dev_priv, pipe) {
1350                 I915_WRITE(PIPESTAT(pipe),
1351                            PIPESTAT_INT_STATUS_MASK |
1352                            PIPE_FIFO_UNDERRUN_STATUS);
1353
1354                 dev_priv->pipestat_irq_mask[pipe] = 0;
1355         }
1356 }
1357
1358 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1359                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1360 {
1361         enum pipe pipe;
1362
1363         spin_lock(&dev_priv->irq_lock);
1364
1365         if (!dev_priv->display_irqs_enabled) {
1366                 spin_unlock(&dev_priv->irq_lock);
1367                 return;
1368         }
1369
1370         for_each_pipe(dev_priv, pipe) {
1371                 i915_reg_t reg;
1372                 u32 status_mask, enable_mask, iir_bit = 0;
1373
1374                 /*
1375                  * PIPESTAT bits get signalled even when the interrupt is
1376                  * disabled with the mask bits, and some of the status bits do
1377                  * not generate interrupts at all (like the underrun bit). Hence
1378                  * we need to be careful that we only handle what we want to
1379                  * handle.
1380                  */
1381
1382                 /* fifo underruns are filterered in the underrun handler. */
1383                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1384
1385                 switch (pipe) {
1386                 default:
1387                 case PIPE_A:
1388                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1389                         break;
1390                 case PIPE_B:
1391                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1392                         break;
1393                 case PIPE_C:
1394                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1395                         break;
1396                 }
1397                 if (iir & iir_bit)
1398                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1399
1400                 if (!status_mask)
1401                         continue;
1402
1403                 reg = PIPESTAT(pipe);
1404                 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1405                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1406
1407                 /*
1408                  * Clear the PIPE*STAT regs before the IIR
1409                  *
1410                  * Toggle the enable bits to make sure we get an
1411                  * edge in the ISR pipe event bit if we don't clear
1412                  * all the enabled status bits. Otherwise the edge
1413                  * triggered IIR on i965/g4x wouldn't notice that
1414                  * an interrupt is still pending.
1415                  */
1416                 if (pipe_stats[pipe]) {
1417                         I915_WRITE(reg, pipe_stats[pipe]);
1418                         I915_WRITE(reg, enable_mask);
1419                 }
1420         }
1421         spin_unlock(&dev_priv->irq_lock);
1422 }
1423
1424 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1425                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1426 {
1427         enum pipe pipe;
1428
1429         for_each_pipe(dev_priv, pipe) {
1430                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1431                         intel_handle_vblank(dev_priv, pipe);
1432
1433                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1434                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1435
1436                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1437                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1438         }
1439 }
1440
1441 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1442                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1443 {
1444         bool blc_event = false;
1445         enum pipe pipe;
1446
1447         for_each_pipe(dev_priv, pipe) {
1448                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1449                         intel_handle_vblank(dev_priv, pipe);
1450
1451                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1452                         blc_event = true;
1453
1454                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1455                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1456
1457                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1458                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1459         }
1460
1461         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1462                 intel_opregion_asle_intr(dev_priv);
1463 }
1464
1465 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1466                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1467 {
1468         bool blc_event = false;
1469         enum pipe pipe;
1470
1471         for_each_pipe(dev_priv, pipe) {
1472                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1473                         intel_handle_vblank(dev_priv, pipe);
1474
1475                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1476                         blc_event = true;
1477
1478                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1479                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1480
1481                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1482                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1483         }
1484
1485         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1486                 intel_opregion_asle_intr(dev_priv);
1487
1488         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1489                 gmbus_irq_handler(dev_priv);
1490 }
1491
1492 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1493                                             u32 pipe_stats[I915_MAX_PIPES])
1494 {
1495         enum pipe pipe;
1496
1497         for_each_pipe(dev_priv, pipe) {
1498                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1499                         intel_handle_vblank(dev_priv, pipe);
1500
1501                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1502                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1503
1504                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1505                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1506         }
1507
1508         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1509                 gmbus_irq_handler(dev_priv);
1510 }
1511
1512 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1513 {
1514         u32 hotplug_status = 0, hotplug_status_mask;
1515         int i;
1516
1517         if (IS_G4X(dev_priv) ||
1518             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1519                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1520                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1521         else
1522                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1523
1524         /*
1525          * We absolutely have to clear all the pending interrupt
1526          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1527          * interrupt bit won't have an edge, and the i965/g4x
1528          * edge triggered IIR will not notice that an interrupt
1529          * is still pending. We can't use PORT_HOTPLUG_EN to
1530          * guarantee the edge as the act of toggling the enable
1531          * bits can itself generate a new hotplug interrupt :(
1532          */
1533         for (i = 0; i < 10; i++) {
1534                 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1535
1536                 if (tmp == 0)
1537                         return hotplug_status;
1538
1539                 hotplug_status |= tmp;
1540                 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1541         }
1542
1543         drm_WARN_ONCE(&dev_priv->drm, 1,
1544                       "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1545                       I915_READ(PORT_HOTPLUG_STAT));
1546
1547         return hotplug_status;
1548 }
1549
1550 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1551                                  u32 hotplug_status)
1552 {
1553         u32 pin_mask = 0, long_mask = 0;
1554         u32 hotplug_trigger;
1555
1556         if (IS_G4X(dev_priv) ||
1557             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1558                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1559         else
1560                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1561
1562         if (hotplug_trigger) {
1563                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1564                                    hotplug_trigger, hotplug_trigger,
1565                                    dev_priv->hotplug.hpd,
1566                                    i9xx_port_hotplug_long_detect);
1567
1568                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1569         }
1570
1571         if ((IS_G4X(dev_priv) ||
1572              IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1573             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1574                 dp_aux_irq_handler(dev_priv);
1575 }
1576
1577 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1578 {
1579         struct drm_i915_private *dev_priv = arg;
1580         irqreturn_t ret = IRQ_NONE;
1581
1582         if (!intel_irqs_enabled(dev_priv))
1583                 return IRQ_NONE;
1584
1585         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1586         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1587
1588         do {
1589                 u32 iir, gt_iir, pm_iir;
1590                 u32 pipe_stats[I915_MAX_PIPES] = {};
1591                 u32 hotplug_status = 0;
1592                 u32 ier = 0;
1593
1594                 gt_iir = I915_READ(GTIIR);
1595                 pm_iir = I915_READ(GEN6_PMIIR);
1596                 iir = I915_READ(VLV_IIR);
1597
1598                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1599                         break;
1600
1601                 ret = IRQ_HANDLED;
1602
1603                 /*
1604                  * Theory on interrupt generation, based on empirical evidence:
1605                  *
1606                  * x = ((VLV_IIR & VLV_IER) ||
1607                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1608                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1609                  *
1610                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1611                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1612                  * guarantee the CPU interrupt will be raised again even if we
1613                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1614                  * bits this time around.
1615                  */
1616                 I915_WRITE(VLV_MASTER_IER, 0);
1617                 ier = I915_READ(VLV_IER);
1618                 I915_WRITE(VLV_IER, 0);
1619
1620                 if (gt_iir)
1621                         I915_WRITE(GTIIR, gt_iir);
1622                 if (pm_iir)
1623                         I915_WRITE(GEN6_PMIIR, pm_iir);
1624
1625                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1626                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1627
1628                 /* Call regardless, as some status bits might not be
1629                  * signalled in iir */
1630                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1631
1632                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1633                            I915_LPE_PIPE_B_INTERRUPT))
1634                         intel_lpe_audio_irq_handler(dev_priv);
1635
1636                 /*
1637                  * VLV_IIR is single buffered, and reflects the level
1638                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1639                  */
1640                 if (iir)
1641                         I915_WRITE(VLV_IIR, iir);
1642
1643                 I915_WRITE(VLV_IER, ier);
1644                 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1645
1646                 if (gt_iir)
1647                         gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1648                 if (pm_iir)
1649                         gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1650
1651                 if (hotplug_status)
1652                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1653
1654                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1655         } while (0);
1656
1657         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1658
1659         return ret;
1660 }
1661
1662 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1663 {
1664         struct drm_i915_private *dev_priv = arg;
1665         irqreturn_t ret = IRQ_NONE;
1666
1667         if (!intel_irqs_enabled(dev_priv))
1668                 return IRQ_NONE;
1669
1670         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1671         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1672
1673         do {
1674                 u32 master_ctl, iir;
1675                 u32 pipe_stats[I915_MAX_PIPES] = {};
1676                 u32 hotplug_status = 0;
1677                 u32 ier = 0;
1678
1679                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1680                 iir = I915_READ(VLV_IIR);
1681
1682                 if (master_ctl == 0 && iir == 0)
1683                         break;
1684
1685                 ret = IRQ_HANDLED;
1686
1687                 /*
1688                  * Theory on interrupt generation, based on empirical evidence:
1689                  *
1690                  * x = ((VLV_IIR & VLV_IER) ||
1691                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1692                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1693                  *
1694                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1695                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1696                  * guarantee the CPU interrupt will be raised again even if we
1697                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1698                  * bits this time around.
1699                  */
1700                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1701                 ier = I915_READ(VLV_IER);
1702                 I915_WRITE(VLV_IER, 0);
1703
1704                 gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
1705
1706                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1707                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1708
1709                 /* Call regardless, as some status bits might not be
1710                  * signalled in iir */
1711                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1712
1713                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1714                            I915_LPE_PIPE_B_INTERRUPT |
1715                            I915_LPE_PIPE_C_INTERRUPT))
1716                         intel_lpe_audio_irq_handler(dev_priv);
1717
1718                 /*
1719                  * VLV_IIR is single buffered, and reflects the level
1720                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1721                  */
1722                 if (iir)
1723                         I915_WRITE(VLV_IIR, iir);
1724
1725                 I915_WRITE(VLV_IER, ier);
1726                 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1727
1728                 if (hotplug_status)
1729                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1730
1731                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1732         } while (0);
1733
1734         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1735
1736         return ret;
1737 }
1738
1739 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1740                                 u32 hotplug_trigger)
1741 {
1742         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1743
1744         /*
1745          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1746          * unless we touch the hotplug register, even if hotplug_trigger is
1747          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1748          * errors.
1749          */
1750         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1751         if (!hotplug_trigger) {
1752                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1753                         PORTD_HOTPLUG_STATUS_MASK |
1754                         PORTC_HOTPLUG_STATUS_MASK |
1755                         PORTB_HOTPLUG_STATUS_MASK;
1756                 dig_hotplug_reg &= ~mask;
1757         }
1758
1759         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1760         if (!hotplug_trigger)
1761                 return;
1762
1763         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1764                            hotplug_trigger, dig_hotplug_reg,
1765                            dev_priv->hotplug.pch_hpd,
1766                            pch_port_hotplug_long_detect);
1767
1768         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1769 }
1770
1771 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1772 {
1773         enum pipe pipe;
1774         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1775
1776         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1777
1778         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1779                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1780                                SDE_AUDIO_POWER_SHIFT);
1781                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1782                         port_name(port));
1783         }
1784
1785         if (pch_iir & SDE_AUX_MASK)
1786                 dp_aux_irq_handler(dev_priv);
1787
1788         if (pch_iir & SDE_GMBUS)
1789                 gmbus_irq_handler(dev_priv);
1790
1791         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1792                 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1793
1794         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1795                 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1796
1797         if (pch_iir & SDE_POISON)
1798                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1799
1800         if (pch_iir & SDE_FDI_MASK) {
1801                 for_each_pipe(dev_priv, pipe)
1802                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1803                                 pipe_name(pipe),
1804                                 I915_READ(FDI_RX_IIR(pipe)));
1805         }
1806
1807         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1808                 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1809
1810         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1811                 drm_dbg(&dev_priv->drm,
1812                         "PCH transcoder CRC error interrupt\n");
1813
1814         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1815                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1816
1817         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1818                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1819 }
1820
1821 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1822 {
1823         u32 err_int = I915_READ(GEN7_ERR_INT);
1824         enum pipe pipe;
1825
1826         if (err_int & ERR_INT_POISON)
1827                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1828
1829         for_each_pipe(dev_priv, pipe) {
1830                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1831                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1832
1833                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1834                         if (IS_IVYBRIDGE(dev_priv))
1835                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1836                         else
1837                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1838                 }
1839         }
1840
1841         I915_WRITE(GEN7_ERR_INT, err_int);
1842 }
1843
1844 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1845 {
1846         u32 serr_int = I915_READ(SERR_INT);
1847         enum pipe pipe;
1848
1849         if (serr_int & SERR_INT_POISON)
1850                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1851
1852         for_each_pipe(dev_priv, pipe)
1853                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1854                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1855
1856         I915_WRITE(SERR_INT, serr_int);
1857 }
1858
1859 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1860 {
1861         enum pipe pipe;
1862         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1863
1864         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1865
1866         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1867                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1868                                SDE_AUDIO_POWER_SHIFT_CPT);
1869                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1870                         port_name(port));
1871         }
1872
1873         if (pch_iir & SDE_AUX_MASK_CPT)
1874                 dp_aux_irq_handler(dev_priv);
1875
1876         if (pch_iir & SDE_GMBUS_CPT)
1877                 gmbus_irq_handler(dev_priv);
1878
1879         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1880                 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1881
1882         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1883                 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1884
1885         if (pch_iir & SDE_FDI_MASK_CPT) {
1886                 for_each_pipe(dev_priv, pipe)
1887                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1888                                 pipe_name(pipe),
1889                                 I915_READ(FDI_RX_IIR(pipe)));
1890         }
1891
1892         if (pch_iir & SDE_ERROR_CPT)
1893                 cpt_serr_int_handler(dev_priv);
1894 }
1895
1896 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1897 {
1898         u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1899         u32 pin_mask = 0, long_mask = 0;
1900
1901         if (HAS_PCH_DG1(dev_priv)) {
1902                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_DG1;
1903                 tc_hotplug_trigger = 0;
1904         } else if (HAS_PCH_TGP(dev_priv)) {
1905                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1906                 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1907         } else if (HAS_PCH_JSP(dev_priv)) {
1908                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1909                 tc_hotplug_trigger = 0;
1910         } else if (HAS_PCH_MCC(dev_priv)) {
1911                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1912                 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1);
1913         } else {
1914                 drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
1915                          "Unrecognized PCH type 0x%x\n",
1916                          INTEL_PCH_TYPE(dev_priv));
1917
1918                 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1919                 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1920         }
1921
1922         if (ddi_hotplug_trigger) {
1923                 u32 dig_hotplug_reg;
1924
1925                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1926                 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1927
1928                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1929                                    ddi_hotplug_trigger, dig_hotplug_reg,
1930                                    dev_priv->hotplug.pch_hpd,
1931                                    icp_ddi_port_hotplug_long_detect);
1932         }
1933
1934         if (tc_hotplug_trigger) {
1935                 u32 dig_hotplug_reg;
1936
1937                 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1938                 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1939
1940                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1941                                    tc_hotplug_trigger, dig_hotplug_reg,
1942                                    dev_priv->hotplug.pch_hpd,
1943                                    icp_tc_port_hotplug_long_detect);
1944         }
1945
1946         if (pin_mask)
1947                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1948
1949         if (pch_iir & SDE_GMBUS_ICP)
1950                 gmbus_irq_handler(dev_priv);
1951 }
1952
1953 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1954 {
1955         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1956                 ~SDE_PORTE_HOTPLUG_SPT;
1957         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1958         u32 pin_mask = 0, long_mask = 0;
1959
1960         if (hotplug_trigger) {
1961                 u32 dig_hotplug_reg;
1962
1963                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1964                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1965
1966                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1967                                    hotplug_trigger, dig_hotplug_reg,
1968                                    dev_priv->hotplug.pch_hpd,
1969                                    spt_port_hotplug_long_detect);
1970         }
1971
1972         if (hotplug2_trigger) {
1973                 u32 dig_hotplug_reg;
1974
1975                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1976                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1977
1978                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1979                                    hotplug2_trigger, dig_hotplug_reg,
1980                                    dev_priv->hotplug.pch_hpd,
1981                                    spt_port_hotplug2_long_detect);
1982         }
1983
1984         if (pin_mask)
1985                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1986
1987         if (pch_iir & SDE_GMBUS_CPT)
1988                 gmbus_irq_handler(dev_priv);
1989 }
1990
1991 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1992                                 u32 hotplug_trigger)
1993 {
1994         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1995
1996         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1997         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1998
1999         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2000                            hotplug_trigger, dig_hotplug_reg,
2001                            dev_priv->hotplug.hpd,
2002                            ilk_port_hotplug_long_detect);
2003
2004         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2005 }
2006
2007 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2008                                     u32 de_iir)
2009 {
2010         enum pipe pipe;
2011         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2012
2013         if (hotplug_trigger)
2014                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2015
2016         if (de_iir & DE_AUX_CHANNEL_A)
2017                 dp_aux_irq_handler(dev_priv);
2018
2019         if (de_iir & DE_GSE)
2020                 intel_opregion_asle_intr(dev_priv);
2021
2022         if (de_iir & DE_POISON)
2023                 drm_err(&dev_priv->drm, "Poison interrupt\n");
2024
2025         for_each_pipe(dev_priv, pipe) {
2026                 if (de_iir & DE_PIPE_VBLANK(pipe))
2027                         intel_handle_vblank(dev_priv, pipe);
2028
2029                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2030                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2031
2032                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2033                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2034         }
2035
2036         /* check event from PCH */
2037         if (de_iir & DE_PCH_EVENT) {
2038                 u32 pch_iir = I915_READ(SDEIIR);
2039
2040                 if (HAS_PCH_CPT(dev_priv))
2041                         cpt_irq_handler(dev_priv, pch_iir);
2042                 else
2043                         ibx_irq_handler(dev_priv, pch_iir);
2044
2045                 /* should clear PCH hotplug event before clear CPU irq */
2046                 I915_WRITE(SDEIIR, pch_iir);
2047         }
2048
2049         if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2050                 gen5_rps_irq_handler(&dev_priv->gt.rps);
2051 }
2052
2053 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2054                                     u32 de_iir)
2055 {
2056         enum pipe pipe;
2057         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2058
2059         if (hotplug_trigger)
2060                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2061
2062         if (de_iir & DE_ERR_INT_IVB)
2063                 ivb_err_int_handler(dev_priv);
2064
2065         if (de_iir & DE_EDP_PSR_INT_HSW) {
2066                 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2067
2068                 intel_psr_irq_handler(dev_priv, psr_iir);
2069                 I915_WRITE(EDP_PSR_IIR, psr_iir);
2070         }
2071
2072         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2073                 dp_aux_irq_handler(dev_priv);
2074
2075         if (de_iir & DE_GSE_IVB)
2076                 intel_opregion_asle_intr(dev_priv);
2077
2078         for_each_pipe(dev_priv, pipe) {
2079                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2080                         intel_handle_vblank(dev_priv, pipe);
2081         }
2082
2083         /* check event from PCH */
2084         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2085                 u32 pch_iir = I915_READ(SDEIIR);
2086
2087                 cpt_irq_handler(dev_priv, pch_iir);
2088
2089                 /* clear PCH hotplug event before clear CPU irq */
2090                 I915_WRITE(SDEIIR, pch_iir);
2091         }
2092 }
2093
2094 /*
2095  * To handle irqs with the minimum potential races with fresh interrupts, we:
2096  * 1 - Disable Master Interrupt Control.
2097  * 2 - Find the source(s) of the interrupt.
2098  * 3 - Clear the Interrupt Identity bits (IIR).
2099  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2100  * 5 - Re-enable Master Interrupt Control.
2101  */
2102 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2103 {
2104         struct drm_i915_private *i915 = arg;
2105         void __iomem * const regs = i915->uncore.regs;
2106         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2107         irqreturn_t ret = IRQ_NONE;
2108
2109         if (unlikely(!intel_irqs_enabled(i915)))
2110                 return IRQ_NONE;
2111
2112         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2113         disable_rpm_wakeref_asserts(&i915->runtime_pm);
2114
2115         /* disable master interrupt before clearing iir  */
2116         de_ier = raw_reg_read(regs, DEIER);
2117         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2118
2119         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2120          * interrupts will will be stored on its back queue, and then we'll be
2121          * able to process them after we restore SDEIER (as soon as we restore
2122          * it, we'll get an interrupt if SDEIIR still has something to process
2123          * due to its back queue). */
2124         if (!HAS_PCH_NOP(i915)) {
2125                 sde_ier = raw_reg_read(regs, SDEIER);
2126                 raw_reg_write(regs, SDEIER, 0);
2127         }
2128
2129         /* Find, clear, then process each source of interrupt */
2130
2131         gt_iir = raw_reg_read(regs, GTIIR);
2132         if (gt_iir) {
2133                 raw_reg_write(regs, GTIIR, gt_iir);
2134                 if (INTEL_GEN(i915) >= 6)
2135                         gen6_gt_irq_handler(&i915->gt, gt_iir);
2136                 else
2137                         gen5_gt_irq_handler(&i915->gt, gt_iir);
2138                 ret = IRQ_HANDLED;
2139         }
2140
2141         de_iir = raw_reg_read(regs, DEIIR);
2142         if (de_iir) {
2143                 raw_reg_write(regs, DEIIR, de_iir);
2144                 if (INTEL_GEN(i915) >= 7)
2145                         ivb_display_irq_handler(i915, de_iir);
2146                 else
2147                         ilk_display_irq_handler(i915, de_iir);
2148                 ret = IRQ_HANDLED;
2149         }
2150
2151         if (INTEL_GEN(i915) >= 6) {
2152                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2153                 if (pm_iir) {
2154                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2155                         gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
2156                         ret = IRQ_HANDLED;
2157                 }
2158         }
2159
2160         raw_reg_write(regs, DEIER, de_ier);
2161         if (sde_ier)
2162                 raw_reg_write(regs, SDEIER, sde_ier);
2163
2164         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2165         enable_rpm_wakeref_asserts(&i915->runtime_pm);
2166
2167         return ret;
2168 }
2169
2170 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2171                                 u32 hotplug_trigger)
2172 {
2173         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2174
2175         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2176         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2177
2178         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2179                            hotplug_trigger, dig_hotplug_reg,
2180                            dev_priv->hotplug.hpd,
2181                            bxt_port_hotplug_long_detect);
2182
2183         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2184 }
2185
2186 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2187 {
2188         u32 pin_mask = 0, long_mask = 0;
2189         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2190         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2191
2192         if (trigger_tc) {
2193                 u32 dig_hotplug_reg;
2194
2195                 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2196                 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2197
2198                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2199                                    trigger_tc, dig_hotplug_reg,
2200                                    dev_priv->hotplug.hpd,
2201                                    gen11_port_hotplug_long_detect);
2202         }
2203
2204         if (trigger_tbt) {
2205                 u32 dig_hotplug_reg;
2206
2207                 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2208                 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2209
2210                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2211                                    trigger_tbt, dig_hotplug_reg,
2212                                    dev_priv->hotplug.hpd,
2213                                    gen11_port_hotplug_long_detect);
2214         }
2215
2216         if (pin_mask)
2217                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2218         else
2219                 drm_err(&dev_priv->drm,
2220                         "Unexpected DE HPD interrupt 0x%08x\n", iir);
2221 }
2222
2223 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2224 {
2225         u32 mask;
2226
2227         if (INTEL_GEN(dev_priv) >= 12)
2228                 return TGL_DE_PORT_AUX_DDIA |
2229                         TGL_DE_PORT_AUX_DDIB |
2230                         TGL_DE_PORT_AUX_DDIC |
2231                         TGL_DE_PORT_AUX_USBC1 |
2232                         TGL_DE_PORT_AUX_USBC2 |
2233                         TGL_DE_PORT_AUX_USBC3 |
2234                         TGL_DE_PORT_AUX_USBC4 |
2235                         TGL_DE_PORT_AUX_USBC5 |
2236                         TGL_DE_PORT_AUX_USBC6;
2237
2238
2239         mask = GEN8_AUX_CHANNEL_A;
2240         if (INTEL_GEN(dev_priv) >= 9)
2241                 mask |= GEN9_AUX_CHANNEL_B |
2242                         GEN9_AUX_CHANNEL_C |
2243                         GEN9_AUX_CHANNEL_D;
2244
2245         if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2246                 mask |= CNL_AUX_CHANNEL_F;
2247
2248         if (IS_GEN(dev_priv, 11))
2249                 mask |= ICL_AUX_CHANNEL_E;
2250
2251         return mask;
2252 }
2253
2254 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2255 {
2256         if (IS_ROCKETLAKE(dev_priv))
2257                 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2258         else if (INTEL_GEN(dev_priv) >= 11)
2259                 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2260         else if (INTEL_GEN(dev_priv) >= 9)
2261                 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2262         else
2263                 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2264 }
2265
2266 static void
2267 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2268 {
2269         bool found = false;
2270
2271         if (iir & GEN8_DE_MISC_GSE) {
2272                 intel_opregion_asle_intr(dev_priv);
2273                 found = true;
2274         }
2275
2276         if (iir & GEN8_DE_EDP_PSR) {
2277                 u32 psr_iir;
2278                 i915_reg_t iir_reg;
2279
2280                 if (INTEL_GEN(dev_priv) >= 12)
2281                         iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2282                 else
2283                         iir_reg = EDP_PSR_IIR;
2284
2285                 psr_iir = I915_READ(iir_reg);
2286                 I915_WRITE(iir_reg, psr_iir);
2287
2288                 if (psr_iir)
2289                         found = true;
2290
2291                 intel_psr_irq_handler(dev_priv, psr_iir);
2292         }
2293
2294         if (!found)
2295                 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2296 }
2297
2298 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2299                                            u32 te_trigger)
2300 {
2301         enum pipe pipe = INVALID_PIPE;
2302         enum transcoder dsi_trans;
2303         enum port port;
2304         u32 val, tmp;
2305
2306         /*
2307          * Incase of dual link, TE comes from DSI_1
2308          * this is to check if dual link is enabled
2309          */
2310         val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2311         val &= PORT_SYNC_MODE_ENABLE;
2312
2313         /*
2314          * if dual link is enabled, then read DSI_0
2315          * transcoder registers
2316          */
2317         port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2318                                                   PORT_A : PORT_B;
2319         dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2320
2321         /* Check if DSI configured in command mode */
2322         val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
2323         val = val & OP_MODE_MASK;
2324
2325         if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2326                 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2327                 return;
2328         }
2329
2330         /* Get PIPE for handling VBLANK event */
2331         val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
2332         switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2333         case TRANS_DDI_EDP_INPUT_A_ON:
2334                 pipe = PIPE_A;
2335                 break;
2336         case TRANS_DDI_EDP_INPUT_B_ONOFF:
2337                 pipe = PIPE_B;
2338                 break;
2339         case TRANS_DDI_EDP_INPUT_C_ONOFF:
2340                 pipe = PIPE_C;
2341                 break;
2342         default:
2343                 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2344                 return;
2345         }
2346
2347         intel_handle_vblank(dev_priv, pipe);
2348
2349         /* clear TE in dsi IIR */
2350         port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2351         tmp = I915_READ(DSI_INTR_IDENT_REG(port));
2352         I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
2353 }
2354
2355 static irqreturn_t
2356 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2357 {
2358         irqreturn_t ret = IRQ_NONE;
2359         u32 iir;
2360         enum pipe pipe;
2361
2362         if (master_ctl & GEN8_DE_MISC_IRQ) {
2363                 iir = I915_READ(GEN8_DE_MISC_IIR);
2364                 if (iir) {
2365                         I915_WRITE(GEN8_DE_MISC_IIR, iir);
2366                         ret = IRQ_HANDLED;
2367                         gen8_de_misc_irq_handler(dev_priv, iir);
2368                 } else {
2369                         drm_err(&dev_priv->drm,
2370                                 "The master control interrupt lied (DE MISC)!\n");
2371                 }
2372         }
2373
2374         if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2375                 iir = I915_READ(GEN11_DE_HPD_IIR);
2376                 if (iir) {
2377                         I915_WRITE(GEN11_DE_HPD_IIR, iir);
2378                         ret = IRQ_HANDLED;
2379                         gen11_hpd_irq_handler(dev_priv, iir);
2380                 } else {
2381                         drm_err(&dev_priv->drm,
2382                                 "The master control interrupt lied, (DE HPD)!\n");
2383                 }
2384         }
2385
2386         if (master_ctl & GEN8_DE_PORT_IRQ) {
2387                 iir = I915_READ(GEN8_DE_PORT_IIR);
2388                 if (iir) {
2389                         u32 tmp_mask;
2390                         bool found = false;
2391
2392                         I915_WRITE(GEN8_DE_PORT_IIR, iir);
2393                         ret = IRQ_HANDLED;
2394
2395                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2396                                 dp_aux_irq_handler(dev_priv);
2397                                 found = true;
2398                         }
2399
2400                         if (IS_GEN9_LP(dev_priv)) {
2401                                 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2402                                 if (tmp_mask) {
2403                                         bxt_hpd_irq_handler(dev_priv, tmp_mask);
2404                                         found = true;
2405                                 }
2406                         } else if (IS_BROADWELL(dev_priv)) {
2407                                 tmp_mask = iir & BDW_DE_PORT_HOTPLUG_MASK;
2408                                 if (tmp_mask) {
2409                                         ilk_hpd_irq_handler(dev_priv, tmp_mask);
2410                                         found = true;
2411                                 }
2412                         }
2413
2414                         if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2415                                 gmbus_irq_handler(dev_priv);
2416                                 found = true;
2417                         }
2418
2419                         if (INTEL_GEN(dev_priv) >= 11) {
2420                                 tmp_mask = iir & (DSI0_TE | DSI1_TE);
2421                                 if (tmp_mask) {
2422                                         gen11_dsi_te_interrupt_handler(dev_priv, tmp_mask);
2423                                         found = true;
2424                                 }
2425                         }
2426
2427                         if (!found)
2428                                 drm_err(&dev_priv->drm,
2429                                         "Unexpected DE Port interrupt\n");
2430                 }
2431                 else
2432                         drm_err(&dev_priv->drm,
2433                                 "The master control interrupt lied (DE PORT)!\n");
2434         }
2435
2436         for_each_pipe(dev_priv, pipe) {
2437                 u32 fault_errors;
2438
2439                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2440                         continue;
2441
2442                 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2443                 if (!iir) {
2444                         drm_err(&dev_priv->drm,
2445                                 "The master control interrupt lied (DE PIPE)!\n");
2446                         continue;
2447                 }
2448
2449                 ret = IRQ_HANDLED;
2450                 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2451
2452                 if (iir & GEN8_PIPE_VBLANK)
2453                         intel_handle_vblank(dev_priv, pipe);
2454
2455                 if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
2456                         flip_done_handler(dev_priv, pipe);
2457
2458                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2459                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2460
2461                 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2462                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2463
2464                 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2465                 if (fault_errors)
2466                         drm_err(&dev_priv->drm,
2467                                 "Fault errors on pipe %c: 0x%08x\n",
2468                                 pipe_name(pipe),
2469                                 fault_errors);
2470         }
2471
2472         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2473             master_ctl & GEN8_DE_PCH_IRQ) {
2474                 /*
2475                  * FIXME(BDW): Assume for now that the new interrupt handling
2476                  * scheme also closed the SDE interrupt handling race we've seen
2477                  * on older pch-split platforms. But this needs testing.
2478                  */
2479                 iir = I915_READ(SDEIIR);
2480                 if (iir) {
2481                         I915_WRITE(SDEIIR, iir);
2482                         ret = IRQ_HANDLED;
2483
2484                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2485                                 icp_irq_handler(dev_priv, iir);
2486                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2487                                 spt_irq_handler(dev_priv, iir);
2488                         else
2489                                 cpt_irq_handler(dev_priv, iir);
2490                 } else {
2491                         /*
2492                          * Like on previous PCH there seems to be something
2493                          * fishy going on with forwarding PCH interrupts.
2494                          */
2495                         drm_dbg(&dev_priv->drm,
2496                                 "The master control interrupt lied (SDE)!\n");
2497                 }
2498         }
2499
2500         return ret;
2501 }
2502
2503 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2504 {
2505         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2506
2507         /*
2508          * Now with master disabled, get a sample of level indications
2509          * for this interrupt. Indications will be cleared on related acks.
2510          * New indications can and will light up during processing,
2511          * and will generate new interrupt after enabling master.
2512          */
2513         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2514 }
2515
2516 static inline void gen8_master_intr_enable(void __iomem * const regs)
2517 {
2518         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2519 }
2520
2521 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2522 {
2523         struct drm_i915_private *dev_priv = arg;
2524         void __iomem * const regs = dev_priv->uncore.regs;
2525         u32 master_ctl;
2526
2527         if (!intel_irqs_enabled(dev_priv))
2528                 return IRQ_NONE;
2529
2530         master_ctl = gen8_master_intr_disable(regs);
2531         if (!master_ctl) {
2532                 gen8_master_intr_enable(regs);
2533                 return IRQ_NONE;
2534         }
2535
2536         /* Find, queue (onto bottom-halves), then clear each source */
2537         gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
2538
2539         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2540         if (master_ctl & ~GEN8_GT_IRQS) {
2541                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2542                 gen8_de_irq_handler(dev_priv, master_ctl);
2543                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2544         }
2545
2546         gen8_master_intr_enable(regs);
2547
2548         return IRQ_HANDLED;
2549 }
2550
2551 static u32
2552 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2553 {
2554         void __iomem * const regs = gt->uncore->regs;
2555         u32 iir;
2556
2557         if (!(master_ctl & GEN11_GU_MISC_IRQ))
2558                 return 0;
2559
2560         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2561         if (likely(iir))
2562                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2563
2564         return iir;
2565 }
2566
2567 static void
2568 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2569 {
2570         if (iir & GEN11_GU_MISC_GSE)
2571                 intel_opregion_asle_intr(gt->i915);
2572 }
2573
2574 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2575 {
2576         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2577
2578         /*
2579          * Now with master disabled, get a sample of level indications
2580          * for this interrupt. Indications will be cleared on related acks.
2581          * New indications can and will light up during processing,
2582          * and will generate new interrupt after enabling master.
2583          */
2584         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2585 }
2586
2587 static inline void gen11_master_intr_enable(void __iomem * const regs)
2588 {
2589         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2590 }
2591
2592 static void
2593 gen11_display_irq_handler(struct drm_i915_private *i915)
2594 {
2595         void __iomem * const regs = i915->uncore.regs;
2596         const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2597
2598         disable_rpm_wakeref_asserts(&i915->runtime_pm);
2599         /*
2600          * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2601          * for the display related bits.
2602          */
2603         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2604         gen8_de_irq_handler(i915, disp_ctl);
2605         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2606                       GEN11_DISPLAY_IRQ_ENABLE);
2607
2608         enable_rpm_wakeref_asserts(&i915->runtime_pm);
2609 }
2610
2611 static __always_inline irqreturn_t
2612 __gen11_irq_handler(struct drm_i915_private * const i915,
2613                     u32 (*intr_disable)(void __iomem * const regs),
2614                     void (*intr_enable)(void __iomem * const regs))
2615 {
2616         void __iomem * const regs = i915->uncore.regs;
2617         struct intel_gt *gt = &i915->gt;
2618         u32 master_ctl;
2619         u32 gu_misc_iir;
2620
2621         if (!intel_irqs_enabled(i915))
2622                 return IRQ_NONE;
2623
2624         master_ctl = intr_disable(regs);
2625         if (!master_ctl) {
2626                 intr_enable(regs);
2627                 return IRQ_NONE;
2628         }
2629
2630         /* Find, queue (onto bottom-halves), then clear each source */
2631         gen11_gt_irq_handler(gt, master_ctl);
2632
2633         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2634         if (master_ctl & GEN11_DISPLAY_IRQ)
2635                 gen11_display_irq_handler(i915);
2636
2637         gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2638
2639         intr_enable(regs);
2640
2641         gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2642
2643         return IRQ_HANDLED;
2644 }
2645
2646 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2647 {
2648         return __gen11_irq_handler(arg,
2649                                    gen11_master_intr_disable,
2650                                    gen11_master_intr_enable);
2651 }
2652
2653 static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
2654 {
2655         u32 val;
2656
2657         /* First disable interrupts */
2658         raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
2659
2660         /* Get the indication levels and ack the master unit */
2661         val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
2662         if (unlikely(!val))
2663                 return 0;
2664
2665         raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
2666
2667         /*
2668          * Now with master disabled, get a sample of level indications
2669          * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
2670          * out as this bit doesn't exist anymore for DG1
2671          */
2672         val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
2673         if (unlikely(!val))
2674                 return 0;
2675
2676         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
2677
2678         return val;
2679 }
2680
2681 static inline void dg1_master_intr_enable(void __iomem * const regs)
2682 {
2683         raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
2684 }
2685
2686 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2687 {
2688         return __gen11_irq_handler(arg,
2689                                    dg1_master_intr_disable_and_ack,
2690                                    dg1_master_intr_enable);
2691 }
2692
2693 /* Called from drm generic code, passed 'crtc' which
2694  * we use as a pipe index
2695  */
2696 int i8xx_enable_vblank(struct drm_crtc *crtc)
2697 {
2698         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2699         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2700         unsigned long irqflags;
2701
2702         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2703         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2704         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2705
2706         return 0;
2707 }
2708
2709 int i915gm_enable_vblank(struct drm_crtc *crtc)
2710 {
2711         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2712
2713         /*
2714          * Vblank interrupts fail to wake the device up from C2+.
2715          * Disabling render clock gating during C-states avoids
2716          * the problem. There is a small power cost so we do this
2717          * only when vblank interrupts are actually enabled.
2718          */
2719         if (dev_priv->vblank_enabled++ == 0)
2720                 I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2721
2722         return i8xx_enable_vblank(crtc);
2723 }
2724
2725 int i965_enable_vblank(struct drm_crtc *crtc)
2726 {
2727         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2728         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2729         unsigned long irqflags;
2730
2731         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2732         i915_enable_pipestat(dev_priv, pipe,
2733                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2734         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2735
2736         return 0;
2737 }
2738
2739 int ilk_enable_vblank(struct drm_crtc *crtc)
2740 {
2741         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2742         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2743         unsigned long irqflags;
2744         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2745                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2746
2747         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2748         ilk_enable_display_irq(dev_priv, bit);
2749         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2750
2751         /* Even though there is no DMC, frame counter can get stuck when
2752          * PSR is active as no frames are generated.
2753          */
2754         if (HAS_PSR(dev_priv))
2755                 drm_crtc_vblank_restore(crtc);
2756
2757         return 0;
2758 }
2759
2760 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2761                                    bool enable)
2762 {
2763         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2764         enum port port;
2765         u32 tmp;
2766
2767         if (!(intel_crtc->mode_flags &
2768             (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2769                 return false;
2770
2771         /* for dual link cases we consider TE from slave */
2772         if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2773                 port = PORT_B;
2774         else
2775                 port = PORT_A;
2776
2777         tmp =  I915_READ(DSI_INTR_MASK_REG(port));
2778         if (enable)
2779                 tmp &= ~DSI_TE_EVENT;
2780         else
2781                 tmp |= DSI_TE_EVENT;
2782
2783         I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
2784
2785         tmp = I915_READ(DSI_INTR_IDENT_REG(port));
2786         I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
2787
2788         return true;
2789 }
2790
2791 int bdw_enable_vblank(struct drm_crtc *crtc)
2792 {
2793         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2794         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2795         enum pipe pipe = intel_crtc->pipe;
2796         unsigned long irqflags;
2797
2798         if (gen11_dsi_configure_te(intel_crtc, true))
2799                 return 0;
2800
2801         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2802         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2803         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2804
2805         /* Even if there is no DMC, frame counter can get stuck when
2806          * PSR is active as no frames are generated, so check only for PSR.
2807          */
2808         if (HAS_PSR(dev_priv))
2809                 drm_crtc_vblank_restore(crtc);
2810
2811         return 0;
2812 }
2813
2814 void skl_enable_flip_done(struct intel_crtc *crtc)
2815 {
2816         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2817         enum pipe pipe = crtc->pipe;
2818         unsigned long irqflags;
2819
2820         spin_lock_irqsave(&i915->irq_lock, irqflags);
2821
2822         bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
2823
2824         spin_unlock_irqrestore(&i915->irq_lock, irqflags);
2825 }
2826
2827 /* Called from drm generic code, passed 'crtc' which
2828  * we use as a pipe index
2829  */
2830 void i8xx_disable_vblank(struct drm_crtc *crtc)
2831 {
2832         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2833         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2834         unsigned long irqflags;
2835
2836         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2837         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2838         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2839 }
2840
2841 void i915gm_disable_vblank(struct drm_crtc *crtc)
2842 {
2843         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2844
2845         i8xx_disable_vblank(crtc);
2846
2847         if (--dev_priv->vblank_enabled == 0)
2848                 I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2849 }
2850
2851 void i965_disable_vblank(struct drm_crtc *crtc)
2852 {
2853         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2854         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2855         unsigned long irqflags;
2856
2857         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2858         i915_disable_pipestat(dev_priv, pipe,
2859                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2860         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2861 }
2862
2863 void ilk_disable_vblank(struct drm_crtc *crtc)
2864 {
2865         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2866         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2867         unsigned long irqflags;
2868         u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2869                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2870
2871         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2872         ilk_disable_display_irq(dev_priv, bit);
2873         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2874 }
2875
2876 void bdw_disable_vblank(struct drm_crtc *crtc)
2877 {
2878         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2879         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2880         enum pipe pipe = intel_crtc->pipe;
2881         unsigned long irqflags;
2882
2883         if (gen11_dsi_configure_te(intel_crtc, false))
2884                 return;
2885
2886         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2887         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2888         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2889 }
2890
2891 void skl_disable_flip_done(struct intel_crtc *crtc)
2892 {
2893         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
2894         enum pipe pipe = crtc->pipe;
2895         unsigned long irqflags;
2896
2897         spin_lock_irqsave(&i915->irq_lock, irqflags);
2898
2899         bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
2900
2901         spin_unlock_irqrestore(&i915->irq_lock, irqflags);
2902 }
2903
2904 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2905 {
2906         struct intel_uncore *uncore = &dev_priv->uncore;
2907
2908         if (HAS_PCH_NOP(dev_priv))
2909                 return;
2910
2911         GEN3_IRQ_RESET(uncore, SDE);
2912
2913         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2914                 I915_WRITE(SERR_INT, 0xffffffff);
2915 }
2916
2917 /*
2918  * SDEIER is also touched by the interrupt handler to work around missed PCH
2919  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2920  * instead we unconditionally enable all PCH interrupt sources here, but then
2921  * only unmask them as needed with SDEIMR.
2922  *
2923  * This function needs to be called before interrupts are enabled.
2924  */
2925 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2926 {
2927         if (HAS_PCH_NOP(dev_priv))
2928                 return;
2929
2930         drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
2931         I915_WRITE(SDEIER, 0xffffffff);
2932         POSTING_READ(SDEIER);
2933 }
2934
2935 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2936 {
2937         struct intel_uncore *uncore = &dev_priv->uncore;
2938
2939         if (IS_CHERRYVIEW(dev_priv))
2940                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2941         else
2942                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2943
2944         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2945         intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2946
2947         i9xx_pipestat_irq_reset(dev_priv);
2948
2949         GEN3_IRQ_RESET(uncore, VLV_);
2950         dev_priv->irq_mask = ~0u;
2951 }
2952
2953 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2954 {
2955         struct intel_uncore *uncore = &dev_priv->uncore;
2956
2957         u32 pipestat_mask;
2958         u32 enable_mask;
2959         enum pipe pipe;
2960
2961         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2962
2963         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2964         for_each_pipe(dev_priv, pipe)
2965                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2966
2967         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2968                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2969                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2970                 I915_LPE_PIPE_A_INTERRUPT |
2971                 I915_LPE_PIPE_B_INTERRUPT;
2972
2973         if (IS_CHERRYVIEW(dev_priv))
2974                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2975                         I915_LPE_PIPE_C_INTERRUPT;
2976
2977         drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2978
2979         dev_priv->irq_mask = ~enable_mask;
2980
2981         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2982 }
2983
2984 /* drm_dma.h hooks
2985 */
2986 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2987 {
2988         struct intel_uncore *uncore = &dev_priv->uncore;
2989
2990         GEN3_IRQ_RESET(uncore, DE);
2991         dev_priv->irq_mask = ~0u;
2992
2993         if (IS_GEN(dev_priv, 7))
2994                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2995
2996         if (IS_HASWELL(dev_priv)) {
2997                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2998                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2999         }
3000
3001         gen5_gt_irq_reset(&dev_priv->gt);
3002
3003         ibx_irq_reset(dev_priv);
3004 }
3005
3006 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3007 {
3008         I915_WRITE(VLV_MASTER_IER, 0);
3009         POSTING_READ(VLV_MASTER_IER);
3010
3011         gen5_gt_irq_reset(&dev_priv->gt);
3012
3013         spin_lock_irq(&dev_priv->irq_lock);
3014         if (dev_priv->display_irqs_enabled)
3015                 vlv_display_irq_reset(dev_priv);
3016         spin_unlock_irq(&dev_priv->irq_lock);
3017 }
3018
3019 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3020 {
3021         struct intel_uncore *uncore = &dev_priv->uncore;
3022         enum pipe pipe;
3023
3024         gen8_master_intr_disable(dev_priv->uncore.regs);
3025
3026         gen8_gt_irq_reset(&dev_priv->gt);
3027
3028         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3029         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3030
3031         for_each_pipe(dev_priv, pipe)
3032                 if (intel_display_power_is_enabled(dev_priv,
3033                                                    POWER_DOMAIN_PIPE(pipe)))
3034                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3035
3036         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3037         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3038         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3039
3040         if (HAS_PCH_SPLIT(dev_priv))
3041                 ibx_irq_reset(dev_priv);
3042 }
3043
3044 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3045 {
3046         struct intel_uncore *uncore = &dev_priv->uncore;
3047         enum pipe pipe;
3048         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3049                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3050
3051         intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3052
3053         if (INTEL_GEN(dev_priv) >= 12) {
3054                 enum transcoder trans;
3055
3056                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3057                         enum intel_display_power_domain domain;
3058
3059                         domain = POWER_DOMAIN_TRANSCODER(trans);
3060                         if (!intel_display_power_is_enabled(dev_priv, domain))
3061                                 continue;
3062
3063                         intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3064                         intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3065                 }
3066         } else {
3067                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3068                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3069         }
3070
3071         for_each_pipe(dev_priv, pipe)
3072                 if (intel_display_power_is_enabled(dev_priv,
3073                                                    POWER_DOMAIN_PIPE(pipe)))
3074                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3075
3076         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3077         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3078         GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3079
3080         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3081                 GEN3_IRQ_RESET(uncore, SDE);
3082
3083         /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
3084         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
3085                 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3086                                  SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
3087                 intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
3088                                  SBCLK_RUN_REFCLK_DIS, 0);
3089         }
3090 }
3091
3092 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3093 {
3094         struct intel_uncore *uncore = &dev_priv->uncore;
3095
3096         if (HAS_MASTER_UNIT_IRQ(dev_priv))
3097                 dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
3098         else
3099                 gen11_master_intr_disable(dev_priv->uncore.regs);
3100
3101         gen11_gt_irq_reset(&dev_priv->gt);
3102         gen11_display_irq_reset(dev_priv);
3103
3104         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3105         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3106 }
3107
3108 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3109                                      u8 pipe_mask)
3110 {
3111         struct intel_uncore *uncore = &dev_priv->uncore;
3112
3113         u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3114         enum pipe pipe;
3115
3116         if (INTEL_GEN(dev_priv) >= 9)
3117                 extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
3118
3119         spin_lock_irq(&dev_priv->irq_lock);
3120
3121         if (!intel_irqs_enabled(dev_priv)) {
3122                 spin_unlock_irq(&dev_priv->irq_lock);
3123                 return;
3124         }
3125
3126         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3127                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3128                                   dev_priv->de_irq_mask[pipe],
3129                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
3130
3131         spin_unlock_irq(&dev_priv->irq_lock);
3132 }
3133
3134 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3135                                      u8 pipe_mask)
3136 {
3137         struct intel_uncore *uncore = &dev_priv->uncore;
3138         enum pipe pipe;
3139
3140         spin_lock_irq(&dev_priv->irq_lock);
3141
3142         if (!intel_irqs_enabled(dev_priv)) {
3143                 spin_unlock_irq(&dev_priv->irq_lock);
3144                 return;
3145         }
3146
3147         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3148                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3149
3150         spin_unlock_irq(&dev_priv->irq_lock);
3151
3152         /* make sure we're done processing display irqs */
3153         intel_synchronize_irq(dev_priv);
3154 }
3155
3156 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3157 {
3158         struct intel_uncore *uncore = &dev_priv->uncore;
3159
3160         I915_WRITE(GEN8_MASTER_IRQ, 0);
3161         POSTING_READ(GEN8_MASTER_IRQ);
3162
3163         gen8_gt_irq_reset(&dev_priv->gt);
3164
3165         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3166
3167         spin_lock_irq(&dev_priv->irq_lock);
3168         if (dev_priv->display_irqs_enabled)
3169                 vlv_display_irq_reset(dev_priv);
3170         spin_unlock_irq(&dev_priv->irq_lock);
3171 }
3172
3173 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3174 {
3175         u32 hotplug;
3176
3177         /*
3178          * Enable digital hotplug on the PCH, and configure the DP short pulse
3179          * duration to 2ms (which is the minimum in the Display Port spec).
3180          * The pulse duration bits are reserved on LPT+.
3181          */
3182         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3183         hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3184                      PORTC_PULSE_DURATION_MASK |
3185                      PORTD_PULSE_DURATION_MASK);
3186         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3187         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3188         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3189         /*
3190          * When CPU and PCH are on the same package, port A
3191          * HPD must be enabled in both north and south.
3192          */
3193         if (HAS_PCH_LPT_LP(dev_priv))
3194                 hotplug |= PORTA_HOTPLUG_ENABLE;
3195         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3196 }
3197
3198 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3199 {
3200         u32 hotplug_irqs, enabled_irqs;
3201
3202         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3203         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3204
3205         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3206
3207         ibx_hpd_detection_setup(dev_priv);
3208 }
3209
3210 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
3211                                         u32 enable_mask)
3212 {
3213         u32 hotplug;
3214
3215         hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3216         hotplug |= enable_mask;
3217         I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3218 }
3219
3220 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
3221                                        u32 enable_mask)
3222 {
3223         u32 hotplug;
3224
3225         hotplug = I915_READ(SHOTPLUG_CTL_TC);
3226         hotplug |= enable_mask;
3227         I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3228 }
3229
3230 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3231                               u32 ddi_enable_mask, u32 tc_enable_mask)
3232 {
3233         u32 hotplug_irqs, enabled_irqs;
3234
3235         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3236         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3237
3238         if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3239                 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3240
3241         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3242
3243         icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
3244         if (tc_enable_mask)
3245                 icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
3246 }
3247
3248 /*
3249  * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3250  * equivalent of SDE.
3251  */
3252 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3253 {
3254         icp_hpd_irq_setup(dev_priv,
3255                           ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(HPD_PORT_TC1));
3256 }
3257
3258 /*
3259  * JSP behaves exactly the same as MCC above except that port C is mapped to
3260  * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
3261  * masks & tables rather than ICP's masks & tables.
3262  */
3263 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3264 {
3265         icp_hpd_irq_setup(dev_priv,
3266                           TGP_DDI_HPD_ENABLE_MASK, 0);
3267 }
3268
3269 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3270 {
3271         u32 val;
3272
3273         val = I915_READ(SOUTH_CHICKEN1);
3274         val |= (INVERT_DDIA_HPD |
3275                 INVERT_DDIB_HPD |
3276                 INVERT_DDIC_HPD |
3277                 INVERT_DDID_HPD);
3278         I915_WRITE(SOUTH_CHICKEN1, val);
3279
3280         icp_hpd_irq_setup(dev_priv,
3281                           DG1_DDI_HPD_ENABLE_MASK, 0);
3282 }
3283
3284 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3285 {
3286         u32 hotplug;
3287
3288         hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3289         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3290                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3291                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3292                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3293                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3294                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6);
3295         I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3296 }
3297
3298 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3299 {
3300         u32 hotplug;
3301
3302         hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3303         hotplug |= GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3304                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3305                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3306                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3307                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3308                    GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6);
3309         I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3310 }
3311
3312 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3313 {
3314         u32 hotplug_irqs, enabled_irqs;
3315         u32 val;
3316
3317         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3318         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3319
3320         val = I915_READ(GEN11_DE_HPD_IMR);
3321         val &= ~hotplug_irqs;
3322         val |= ~enabled_irqs & hotplug_irqs;
3323         I915_WRITE(GEN11_DE_HPD_IMR, val);
3324         POSTING_READ(GEN11_DE_HPD_IMR);
3325
3326         gen11_tc_hpd_detection_setup(dev_priv);
3327         gen11_tbt_hpd_detection_setup(dev_priv);
3328
3329         if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3330                 icp_hpd_irq_setup(dev_priv,
3331                                   TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
3332         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3333                 icp_hpd_irq_setup(dev_priv,
3334                                   ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
3335 }
3336
3337 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3338 {
3339         u32 val, hotplug;
3340
3341         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3342         if (HAS_PCH_CNP(dev_priv)) {
3343                 val = I915_READ(SOUTH_CHICKEN1);
3344                 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3345                 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3346                 I915_WRITE(SOUTH_CHICKEN1, val);
3347         }
3348
3349         /* Enable digital hotplug on the PCH */
3350         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3351         hotplug |= PORTA_HOTPLUG_ENABLE |
3352                    PORTB_HOTPLUG_ENABLE |
3353                    PORTC_HOTPLUG_ENABLE |
3354                    PORTD_HOTPLUG_ENABLE;
3355         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3356
3357         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3358         hotplug |= PORTE_HOTPLUG_ENABLE;
3359         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3360 }
3361
3362 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3363 {
3364         u32 hotplug_irqs, enabled_irqs;
3365
3366         if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3367                 I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3368
3369         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3370         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
3371
3372         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3373
3374         spt_hpd_detection_setup(dev_priv);
3375 }
3376
3377 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3378 {
3379         u32 hotplug;
3380
3381         /*
3382          * Enable digital hotplug on the CPU, and configure the DP short pulse
3383          * duration to 2ms (which is the minimum in the Display Port spec)
3384          * The pulse duration bits are reserved on HSW+.
3385          */
3386         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3387         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3388         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3389                    DIGITAL_PORTA_PULSE_DURATION_2ms;
3390         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3391 }
3392
3393 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3394 {
3395         u32 hotplug_irqs, enabled_irqs;
3396
3397         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3398         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3399
3400         if (INTEL_GEN(dev_priv) >= 8)
3401                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3402         else
3403                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3404
3405         ilk_hpd_detection_setup(dev_priv);
3406
3407         ibx_hpd_irq_setup(dev_priv);
3408 }
3409
3410 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3411                                     u32 enabled_irqs)
3412 {
3413         u32 hotplug;
3414
3415         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3416         hotplug |= PORTA_HOTPLUG_ENABLE |
3417                    PORTB_HOTPLUG_ENABLE |
3418                    PORTC_HOTPLUG_ENABLE;
3419
3420         drm_dbg_kms(&dev_priv->drm,
3421                     "Invert bit setting: hp_ctl:%x hp_port:%x\n",
3422                     hotplug, enabled_irqs);
3423         hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3424
3425         /*
3426          * For BXT invert bit has to be set based on AOB design
3427          * for HPD detection logic, update it based on VBT fields.
3428          */
3429         if ((enabled_irqs & GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)) &&
3430             intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3431                 hotplug |= BXT_DDIA_HPD_INVERT;
3432         if ((enabled_irqs & GEN8_DE_PORT_HOTPLUG(HPD_PORT_B)) &&
3433             intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3434                 hotplug |= BXT_DDIB_HPD_INVERT;
3435         if ((enabled_irqs & GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)) &&
3436             intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3437                 hotplug |= BXT_DDIC_HPD_INVERT;
3438
3439         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3440 }
3441
3442 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3443 {
3444         u32 hotplug_irqs, enabled_irqs;
3445
3446         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
3447         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
3448
3449         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3450
3451         bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3452 }
3453
3454 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3455 {
3456         u32 mask;
3457
3458         if (HAS_PCH_NOP(dev_priv))
3459                 return;
3460
3461         if (HAS_PCH_IBX(dev_priv))
3462                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3463         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3464                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3465         else
3466                 mask = SDE_GMBUS_CPT;
3467
3468         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3469         I915_WRITE(SDEIMR, ~mask);
3470 }
3471
3472 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3473 {
3474         struct intel_uncore *uncore = &dev_priv->uncore;
3475         u32 display_mask, extra_mask;
3476
3477         if (INTEL_GEN(dev_priv) >= 7) {
3478                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3479                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3480                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3481                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3482                               DE_DP_A_HOTPLUG_IVB);
3483         } else {
3484                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3485                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3486                                 DE_PIPEA_CRC_DONE | DE_POISON);
3487                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3488                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3489                               DE_DP_A_HOTPLUG);
3490         }
3491
3492         if (IS_HASWELL(dev_priv)) {
3493                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3494                 display_mask |= DE_EDP_PSR_INT_HSW;
3495         }
3496
3497         if (IS_IRONLAKE_M(dev_priv))
3498                 extra_mask |= DE_PCU_EVENT;
3499
3500         dev_priv->irq_mask = ~display_mask;
3501
3502         ibx_irq_pre_postinstall(dev_priv);
3503
3504         gen5_gt_irq_postinstall(&dev_priv->gt);
3505
3506         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3507                       display_mask | extra_mask);
3508
3509         ibx_irq_postinstall(dev_priv);
3510 }
3511
3512 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3513 {
3514         lockdep_assert_held(&dev_priv->irq_lock);
3515
3516         if (dev_priv->display_irqs_enabled)
3517                 return;
3518
3519         dev_priv->display_irqs_enabled = true;
3520
3521         if (intel_irqs_enabled(dev_priv)) {
3522                 vlv_display_irq_reset(dev_priv);
3523                 vlv_display_irq_postinstall(dev_priv);
3524         }
3525 }
3526
3527 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3528 {
3529         lockdep_assert_held(&dev_priv->irq_lock);
3530
3531         if (!dev_priv->display_irqs_enabled)
3532                 return;
3533
3534         dev_priv->display_irqs_enabled = false;
3535
3536         if (intel_irqs_enabled(dev_priv))
3537                 vlv_display_irq_reset(dev_priv);
3538 }
3539
3540
3541 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3542 {
3543         gen5_gt_irq_postinstall(&dev_priv->gt);
3544
3545         spin_lock_irq(&dev_priv->irq_lock);
3546         if (dev_priv->display_irqs_enabled)
3547                 vlv_display_irq_postinstall(dev_priv);
3548         spin_unlock_irq(&dev_priv->irq_lock);
3549
3550         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3551         POSTING_READ(VLV_MASTER_IER);
3552 }
3553
3554 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3555 {
3556         struct intel_uncore *uncore = &dev_priv->uncore;
3557
3558         u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3559                 GEN8_PIPE_CDCLK_CRC_DONE;
3560         u32 de_pipe_enables;
3561         u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3562         u32 de_port_enables;
3563         u32 de_misc_masked = GEN8_DE_EDP_PSR;
3564         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3565                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3566         enum pipe pipe;
3567
3568         if (INTEL_GEN(dev_priv) <= 10)
3569                 de_misc_masked |= GEN8_DE_MISC_GSE;
3570
3571         if (IS_GEN9_LP(dev_priv))
3572                 de_port_masked |= BXT_DE_PORT_GMBUS;
3573
3574         if (INTEL_GEN(dev_priv) >= 11) {
3575                 enum port port;
3576
3577                 if (intel_bios_is_dsi_present(dev_priv, &port))
3578                         de_port_masked |= DSI0_TE | DSI1_TE;
3579         }
3580
3581         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3582                                            GEN8_PIPE_FIFO_UNDERRUN;
3583
3584         if (INTEL_GEN(dev_priv) >= 9)
3585                 de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
3586
3587         de_port_enables = de_port_masked;
3588         if (IS_GEN9_LP(dev_priv))
3589                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3590         else if (IS_BROADWELL(dev_priv))
3591                 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3592
3593         if (INTEL_GEN(dev_priv) >= 12) {
3594                 enum transcoder trans;
3595
3596                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3597                         enum intel_display_power_domain domain;
3598
3599                         domain = POWER_DOMAIN_TRANSCODER(trans);
3600                         if (!intel_display_power_is_enabled(dev_priv, domain))
3601                                 continue;
3602
3603                         gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3604                 }
3605         } else {
3606                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3607         }
3608
3609         for_each_pipe(dev_priv, pipe) {
3610                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3611
3612                 if (intel_display_power_is_enabled(dev_priv,
3613                                 POWER_DOMAIN_PIPE(pipe)))
3614                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3615                                           dev_priv->de_irq_mask[pipe],
3616                                           de_pipe_enables);
3617         }
3618
3619         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3620         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3621
3622         if (INTEL_GEN(dev_priv) >= 11) {
3623                 u32 de_hpd_masked = 0;
3624                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3625                                      GEN11_DE_TBT_HOTPLUG_MASK;
3626
3627                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3628                               de_hpd_enables);
3629         }
3630 }
3631
3632 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3633 {
3634         if (HAS_PCH_SPLIT(dev_priv))
3635                 ibx_irq_pre_postinstall(dev_priv);
3636
3637         gen8_gt_irq_postinstall(&dev_priv->gt);
3638         gen8_de_irq_postinstall(dev_priv);
3639
3640         if (HAS_PCH_SPLIT(dev_priv))
3641                 ibx_irq_postinstall(dev_priv);
3642
3643         gen8_master_intr_enable(dev_priv->uncore.regs);
3644 }
3645
3646 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3647 {
3648         u32 mask = SDE_GMBUS_ICP;
3649
3650         drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
3651         I915_WRITE(SDEIER, 0xffffffff);
3652         POSTING_READ(SDEIER);
3653
3654         gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3655         I915_WRITE(SDEIMR, ~mask);
3656 }
3657
3658 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3659 {
3660         struct intel_uncore *uncore = &dev_priv->uncore;
3661         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3662
3663         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3664                 icp_irq_postinstall(dev_priv);
3665
3666         gen11_gt_irq_postinstall(&dev_priv->gt);
3667         gen8_de_irq_postinstall(dev_priv);
3668
3669         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3670
3671         I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3672
3673         if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
3674                 dg1_master_intr_enable(uncore->regs);
3675                 POSTING_READ(DG1_MSTR_UNIT_INTR);
3676         } else {
3677                 gen11_master_intr_enable(uncore->regs);
3678                 POSTING_READ(GEN11_GFX_MSTR_IRQ);
3679         }
3680 }
3681
3682 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3683 {
3684         gen8_gt_irq_postinstall(&dev_priv->gt);
3685
3686         spin_lock_irq(&dev_priv->irq_lock);
3687         if (dev_priv->display_irqs_enabled)
3688                 vlv_display_irq_postinstall(dev_priv);
3689         spin_unlock_irq(&dev_priv->irq_lock);
3690
3691         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3692         POSTING_READ(GEN8_MASTER_IRQ);
3693 }
3694
3695 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3696 {
3697         struct intel_uncore *uncore = &dev_priv->uncore;
3698
3699         i9xx_pipestat_irq_reset(dev_priv);
3700
3701         GEN2_IRQ_RESET(uncore);
3702         dev_priv->irq_mask = ~0u;
3703 }
3704
3705 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3706 {
3707         struct intel_uncore *uncore = &dev_priv->uncore;
3708         u16 enable_mask;
3709
3710         intel_uncore_write16(uncore,
3711                              EMR,
3712                              ~(I915_ERROR_PAGE_TABLE |
3713                                I915_ERROR_MEMORY_REFRESH));
3714
3715         /* Unmask the interrupts that we always want on. */
3716         dev_priv->irq_mask =
3717                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3718                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3719                   I915_MASTER_ERROR_INTERRUPT);
3720
3721         enable_mask =
3722                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3723                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3724                 I915_MASTER_ERROR_INTERRUPT |
3725                 I915_USER_INTERRUPT;
3726
3727         GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3728
3729         /* Interrupt setup is already guaranteed to be single-threaded, this is
3730          * just to make the assert_spin_locked check happy. */
3731         spin_lock_irq(&dev_priv->irq_lock);
3732         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3733         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3734         spin_unlock_irq(&dev_priv->irq_lock);
3735 }
3736
3737 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3738                                u16 *eir, u16 *eir_stuck)
3739 {
3740         struct intel_uncore *uncore = &i915->uncore;
3741         u16 emr;
3742
3743         *eir = intel_uncore_read16(uncore, EIR);
3744
3745         if (*eir)
3746                 intel_uncore_write16(uncore, EIR, *eir);
3747
3748         *eir_stuck = intel_uncore_read16(uncore, EIR);
3749         if (*eir_stuck == 0)
3750                 return;
3751
3752         /*
3753          * Toggle all EMR bits to make sure we get an edge
3754          * in the ISR master error bit if we don't clear
3755          * all the EIR bits. Otherwise the edge triggered
3756          * IIR on i965/g4x wouldn't notice that an interrupt
3757          * is still pending. Also some EIR bits can't be
3758          * cleared except by handling the underlying error
3759          * (or by a GPU reset) so we mask any bit that
3760          * remains set.
3761          */
3762         emr = intel_uncore_read16(uncore, EMR);
3763         intel_uncore_write16(uncore, EMR, 0xffff);
3764         intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3765 }
3766
3767 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3768                                    u16 eir, u16 eir_stuck)
3769 {
3770         DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3771
3772         if (eir_stuck)
3773                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3774                         eir_stuck);
3775 }
3776
3777 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3778                                u32 *eir, u32 *eir_stuck)
3779 {
3780         u32 emr;
3781
3782         *eir = I915_READ(EIR);
3783
3784         I915_WRITE(EIR, *eir);
3785
3786         *eir_stuck = I915_READ(EIR);
3787         if (*eir_stuck == 0)
3788                 return;
3789
3790         /*
3791          * Toggle all EMR bits to make sure we get an edge
3792          * in the ISR master error bit if we don't clear
3793          * all the EIR bits. Otherwise the edge triggered
3794          * IIR on i965/g4x wouldn't notice that an interrupt
3795          * is still pending. Also some EIR bits can't be
3796          * cleared except by handling the underlying error
3797          * (or by a GPU reset) so we mask any bit that
3798          * remains set.
3799          */
3800         emr = I915_READ(EMR);
3801         I915_WRITE(EMR, 0xffffffff);
3802         I915_WRITE(EMR, emr | *eir_stuck);
3803 }
3804
3805 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3806                                    u32 eir, u32 eir_stuck)
3807 {
3808         DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3809
3810         if (eir_stuck)
3811                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
3812                         eir_stuck);
3813 }
3814
3815 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3816 {
3817         struct drm_i915_private *dev_priv = arg;
3818         irqreturn_t ret = IRQ_NONE;
3819
3820         if (!intel_irqs_enabled(dev_priv))
3821                 return IRQ_NONE;
3822
3823         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3824         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3825
3826         do {
3827                 u32 pipe_stats[I915_MAX_PIPES] = {};
3828                 u16 eir = 0, eir_stuck = 0;
3829                 u16 iir;
3830
3831                 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3832                 if (iir == 0)
3833                         break;
3834
3835                 ret = IRQ_HANDLED;
3836
3837                 /* Call regardless, as some status bits might not be
3838                  * signalled in iir */
3839                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3840
3841                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3842                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3843
3844                 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3845
3846                 if (iir & I915_USER_INTERRUPT)
3847                         intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3848
3849                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3850                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3851
3852                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3853         } while (0);
3854
3855         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3856
3857         return ret;
3858 }
3859
3860 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3861 {
3862         struct intel_uncore *uncore = &dev_priv->uncore;
3863
3864         if (I915_HAS_HOTPLUG(dev_priv)) {
3865                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3866                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3867         }
3868
3869         i9xx_pipestat_irq_reset(dev_priv);
3870
3871         GEN3_IRQ_RESET(uncore, GEN2_);
3872         dev_priv->irq_mask = ~0u;
3873 }
3874
3875 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3876 {
3877         struct intel_uncore *uncore = &dev_priv->uncore;
3878         u32 enable_mask;
3879
3880         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3881                           I915_ERROR_MEMORY_REFRESH));
3882
3883         /* Unmask the interrupts that we always want on. */
3884         dev_priv->irq_mask =
3885                 ~(I915_ASLE_INTERRUPT |
3886                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3887                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3888                   I915_MASTER_ERROR_INTERRUPT);
3889
3890         enable_mask =
3891                 I915_ASLE_INTERRUPT |
3892                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3893                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3894                 I915_MASTER_ERROR_INTERRUPT |
3895                 I915_USER_INTERRUPT;
3896
3897         if (I915_HAS_HOTPLUG(dev_priv)) {
3898                 /* Enable in IER... */
3899                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3900                 /* and unmask in IMR */
3901                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3902         }
3903
3904         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3905
3906         /* Interrupt setup is already guaranteed to be single-threaded, this is
3907          * just to make the assert_spin_locked check happy. */
3908         spin_lock_irq(&dev_priv->irq_lock);
3909         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3910         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3911         spin_unlock_irq(&dev_priv->irq_lock);
3912
3913         i915_enable_asle_pipestat(dev_priv);
3914 }
3915
3916 static irqreturn_t i915_irq_handler(int irq, void *arg)
3917 {
3918         struct drm_i915_private *dev_priv = arg;
3919         irqreturn_t ret = IRQ_NONE;
3920
3921         if (!intel_irqs_enabled(dev_priv))
3922                 return IRQ_NONE;
3923
3924         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3925         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3926
3927         do {
3928                 u32 pipe_stats[I915_MAX_PIPES] = {};
3929                 u32 eir = 0, eir_stuck = 0;
3930                 u32 hotplug_status = 0;
3931                 u32 iir;
3932
3933                 iir = I915_READ(GEN2_IIR);
3934                 if (iir == 0)
3935                         break;
3936
3937                 ret = IRQ_HANDLED;
3938
3939                 if (I915_HAS_HOTPLUG(dev_priv) &&
3940                     iir & I915_DISPLAY_PORT_INTERRUPT)
3941                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3942
3943                 /* Call regardless, as some status bits might not be
3944                  * signalled in iir */
3945                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3946
3947                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3948                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3949
3950                 I915_WRITE(GEN2_IIR, iir);
3951
3952                 if (iir & I915_USER_INTERRUPT)
3953                         intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
3954
3955                 if (iir & I915_MASTER_ERROR_INTERRUPT)
3956                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3957
3958                 if (hotplug_status)
3959                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3960
3961                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3962         } while (0);
3963
3964         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3965
3966         return ret;
3967 }
3968
3969 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3970 {
3971         struct intel_uncore *uncore = &dev_priv->uncore;
3972
3973         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3974         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3975
3976         i9xx_pipestat_irq_reset(dev_priv);
3977
3978         GEN3_IRQ_RESET(uncore, GEN2_);
3979         dev_priv->irq_mask = ~0u;
3980 }
3981
3982 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3983 {
3984         struct intel_uncore *uncore = &dev_priv->uncore;
3985         u32 enable_mask;
3986         u32 error_mask;
3987
3988         /*
3989          * Enable some error detection, note the instruction error mask
3990          * bit is reserved, so we leave it masked.
3991          */
3992         if (IS_G4X(dev_priv)) {
3993                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3994                                GM45_ERROR_MEM_PRIV |
3995                                GM45_ERROR_CP_PRIV |
3996                                I915_ERROR_MEMORY_REFRESH);
3997         } else {
3998                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3999                                I915_ERROR_MEMORY_REFRESH);
4000         }
4001         I915_WRITE(EMR, error_mask);
4002
4003         /* Unmask the interrupts that we always want on. */
4004         dev_priv->irq_mask =
4005                 ~(I915_ASLE_INTERRUPT |
4006                   I915_DISPLAY_PORT_INTERRUPT |
4007                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4008                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4009                   I915_MASTER_ERROR_INTERRUPT);
4010
4011         enable_mask =
4012                 I915_ASLE_INTERRUPT |
4013                 I915_DISPLAY_PORT_INTERRUPT |
4014                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4015                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4016                 I915_MASTER_ERROR_INTERRUPT |
4017                 I915_USER_INTERRUPT;
4018
4019         if (IS_G4X(dev_priv))
4020                 enable_mask |= I915_BSD_USER_INTERRUPT;
4021
4022         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4023
4024         /* Interrupt setup is already guaranteed to be single-threaded, this is
4025          * just to make the assert_spin_locked check happy. */
4026         spin_lock_irq(&dev_priv->irq_lock);
4027         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4028         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4029         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4030         spin_unlock_irq(&dev_priv->irq_lock);
4031
4032         i915_enable_asle_pipestat(dev_priv);
4033 }
4034
4035 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4036 {
4037         u32 hotplug_en;
4038
4039         lockdep_assert_held(&dev_priv->irq_lock);
4040
4041         /* Note HDMI and DP share hotplug bits */
4042         /* enable bits are the same for all generations */
4043         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4044         /* Programming the CRT detection parameters tends
4045            to generate a spurious hotplug event about three
4046            seconds later.  So just do it once.
4047         */
4048         if (IS_G4X(dev_priv))
4049                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4050         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4051
4052         /* Ignore TV since it's buggy */
4053         i915_hotplug_interrupt_update_locked(dev_priv,
4054                                              HOTPLUG_INT_EN_MASK |
4055                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4056                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4057                                              hotplug_en);
4058 }
4059
4060 static irqreturn_t i965_irq_handler(int irq, void *arg)
4061 {
4062         struct drm_i915_private *dev_priv = arg;
4063         irqreturn_t ret = IRQ_NONE;
4064
4065         if (!intel_irqs_enabled(dev_priv))
4066                 return IRQ_NONE;
4067
4068         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4069         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4070
4071         do {
4072                 u32 pipe_stats[I915_MAX_PIPES] = {};
4073                 u32 eir = 0, eir_stuck = 0;
4074                 u32 hotplug_status = 0;
4075                 u32 iir;
4076
4077                 iir = I915_READ(GEN2_IIR);
4078                 if (iir == 0)
4079                         break;
4080
4081                 ret = IRQ_HANDLED;
4082
4083                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4084                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4085
4086                 /* Call regardless, as some status bits might not be
4087                  * signalled in iir */
4088                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4089
4090                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4091                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4092
4093                 I915_WRITE(GEN2_IIR, iir);
4094
4095                 if (iir & I915_USER_INTERRUPT)
4096                         intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
4097
4098                 if (iir & I915_BSD_USER_INTERRUPT)
4099                         intel_engine_signal_breadcrumbs(dev_priv->gt.engine[VCS0]);
4100
4101                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4102                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4103
4104                 if (hotplug_status)
4105                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4106
4107                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4108         } while (0);
4109
4110         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4111
4112         return ret;
4113 }
4114
4115 /**
4116  * intel_irq_init - initializes irq support
4117  * @dev_priv: i915 device instance
4118  *
4119  * This function initializes all the irq support including work items, timers
4120  * and all the vtables. It does not setup the interrupt itself though.
4121  */
4122 void intel_irq_init(struct drm_i915_private *dev_priv)
4123 {
4124         struct drm_device *dev = &dev_priv->drm;
4125         int i;
4126
4127         intel_hpd_init_pins(dev_priv);
4128
4129         intel_hpd_init_work(dev_priv);
4130
4131         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4132         for (i = 0; i < MAX_L3_SLICES; ++i)
4133                 dev_priv->l3_parity.remap_info[i] = NULL;
4134
4135         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4136         if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
4137                 dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
4138
4139         dev->vblank_disable_immediate = true;
4140
4141         /* Most platforms treat the display irq block as an always-on
4142          * power domain. vlv/chv can disable it at runtime and need
4143          * special care to avoid writing any of the display block registers
4144          * outside of the power domain. We defer setting up the display irqs
4145          * in this case to the runtime pm.
4146          */
4147         dev_priv->display_irqs_enabled = true;
4148         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4149                 dev_priv->display_irqs_enabled = false;
4150
4151         dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4152         /* If we have MST support, we want to avoid doing short HPD IRQ storm
4153          * detection, as short HPD storms will occur as a natural part of
4154          * sideband messaging with MST.
4155          * On older platforms however, IRQ storms can occur with both long and
4156          * short pulses, as seen on some G4x systems.
4157          */
4158         dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4159
4160         if (HAS_GMCH(dev_priv)) {
4161                 if (I915_HAS_HOTPLUG(dev_priv))
4162                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4163         } else {
4164                 if (HAS_PCH_DG1(dev_priv))
4165                         dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
4166                 else if (HAS_PCH_JSP(dev_priv))
4167                         dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
4168                 else if (HAS_PCH_MCC(dev_priv))
4169                         dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
4170                 else if (INTEL_GEN(dev_priv) >= 11)
4171                         dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
4172                 else if (IS_GEN9_LP(dev_priv))
4173                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4174                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4175                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4176                 else
4177                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4178         }
4179 }
4180
4181 /**
4182  * intel_irq_fini - deinitializes IRQ support
4183  * @i915: i915 device instance
4184  *
4185  * This function deinitializes all the IRQ support.
4186  */
4187 void intel_irq_fini(struct drm_i915_private *i915)
4188 {
4189         int i;
4190
4191         for (i = 0; i < MAX_L3_SLICES; ++i)
4192                 kfree(i915->l3_parity.remap_info[i]);
4193 }
4194
4195 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4196 {
4197         if (HAS_GMCH(dev_priv)) {
4198                 if (IS_CHERRYVIEW(dev_priv))
4199                         return cherryview_irq_handler;
4200                 else if (IS_VALLEYVIEW(dev_priv))
4201                         return valleyview_irq_handler;
4202                 else if (IS_GEN(dev_priv, 4))
4203                         return i965_irq_handler;
4204                 else if (IS_GEN(dev_priv, 3))
4205                         return i915_irq_handler;
4206                 else
4207                         return i8xx_irq_handler;
4208         } else {
4209                 if (HAS_MASTER_UNIT_IRQ(dev_priv))
4210                         return dg1_irq_handler;
4211                 if (INTEL_GEN(dev_priv) >= 11)
4212                         return gen11_irq_handler;
4213                 else if (INTEL_GEN(dev_priv) >= 8)
4214                         return gen8_irq_handler;
4215                 else
4216                         return ilk_irq_handler;
4217         }
4218 }
4219
4220 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4221 {
4222         if (HAS_GMCH(dev_priv)) {
4223                 if (IS_CHERRYVIEW(dev_priv))
4224                         cherryview_irq_reset(dev_priv);
4225                 else if (IS_VALLEYVIEW(dev_priv))
4226                         valleyview_irq_reset(dev_priv);
4227                 else if (IS_GEN(dev_priv, 4))
4228                         i965_irq_reset(dev_priv);
4229                 else if (IS_GEN(dev_priv, 3))
4230                         i915_irq_reset(dev_priv);
4231                 else
4232                         i8xx_irq_reset(dev_priv);
4233         } else {
4234                 if (INTEL_GEN(dev_priv) >= 11)
4235                         gen11_irq_reset(dev_priv);
4236                 else if (INTEL_GEN(dev_priv) >= 8)
4237                         gen8_irq_reset(dev_priv);
4238                 else
4239                         ilk_irq_reset(dev_priv);
4240         }
4241 }
4242
4243 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4244 {
4245         if (HAS_GMCH(dev_priv)) {
4246                 if (IS_CHERRYVIEW(dev_priv))
4247                         cherryview_irq_postinstall(dev_priv);
4248                 else if (IS_VALLEYVIEW(dev_priv))
4249                         valleyview_irq_postinstall(dev_priv);
4250                 else if (IS_GEN(dev_priv, 4))
4251                         i965_irq_postinstall(dev_priv);
4252                 else if (IS_GEN(dev_priv, 3))
4253                         i915_irq_postinstall(dev_priv);
4254                 else
4255                         i8xx_irq_postinstall(dev_priv);
4256         } else {
4257                 if (INTEL_GEN(dev_priv) >= 11)
4258                         gen11_irq_postinstall(dev_priv);
4259                 else if (INTEL_GEN(dev_priv) >= 8)
4260                         gen8_irq_postinstall(dev_priv);
4261                 else
4262                         ilk_irq_postinstall(dev_priv);
4263         }
4264 }
4265
4266 /**
4267  * intel_irq_install - enables the hardware interrupt
4268  * @dev_priv: i915 device instance
4269  *
4270  * This function enables the hardware interrupt handling, but leaves the hotplug
4271  * handling still disabled. It is called after intel_irq_init().
4272  *
4273  * In the driver load and resume code we need working interrupts in a few places
4274  * but don't want to deal with the hassle of concurrent probe and hotplug
4275  * workers. Hence the split into this two-stage approach.
4276  */
4277 int intel_irq_install(struct drm_i915_private *dev_priv)
4278 {
4279         int irq = dev_priv->drm.pdev->irq;
4280         int ret;
4281
4282         /*
4283          * We enable some interrupt sources in our postinstall hooks, so mark
4284          * interrupts as enabled _before_ actually enabling them to avoid
4285          * special cases in our ordering checks.
4286          */
4287         dev_priv->runtime_pm.irqs_enabled = true;
4288
4289         dev_priv->drm.irq_enabled = true;
4290
4291         intel_irq_reset(dev_priv);
4292
4293         ret = request_irq(irq, intel_irq_handler(dev_priv),
4294                           IRQF_SHARED, DRIVER_NAME, dev_priv);
4295         if (ret < 0) {
4296                 dev_priv->drm.irq_enabled = false;
4297                 return ret;
4298         }
4299
4300         intel_irq_postinstall(dev_priv);
4301
4302         return ret;
4303 }
4304
4305 /**
4306  * intel_irq_uninstall - finilizes all irq handling
4307  * @dev_priv: i915 device instance
4308  *
4309  * This stops interrupt and hotplug handling and unregisters and frees all
4310  * resources acquired in the init functions.
4311  */
4312 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4313 {
4314         int irq = dev_priv->drm.pdev->irq;
4315
4316         /*
4317          * FIXME we can get called twice during driver probe
4318          * error handling as well as during driver remove due to
4319          * intel_modeset_driver_remove() calling us out of sequence.
4320          * Would be nice if it didn't do that...
4321          */
4322         if (!dev_priv->drm.irq_enabled)
4323                 return;
4324
4325         dev_priv->drm.irq_enabled = false;
4326
4327         intel_irq_reset(dev_priv);
4328
4329         free_irq(irq, dev_priv);
4330
4331         intel_hpd_cancel_work(dev_priv);
4332         dev_priv->runtime_pm.irqs_enabled = false;
4333 }
4334
4335 /**
4336  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4337  * @dev_priv: i915 device instance
4338  *
4339  * This function is used to disable interrupts at runtime, both in the runtime
4340  * pm and the system suspend/resume code.
4341  */
4342 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4343 {
4344         intel_irq_reset(dev_priv);
4345         dev_priv->runtime_pm.irqs_enabled = false;
4346         intel_synchronize_irq(dev_priv);
4347 }
4348
4349 /**
4350  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4351  * @dev_priv: i915 device instance
4352  *
4353  * This function is used to enable interrupts at runtime, both in the runtime
4354  * pm and the system suspend/resume code.
4355  */
4356 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4357 {
4358         dev_priv->runtime_pm.irqs_enabled = true;
4359         intel_irq_reset(dev_priv);
4360         intel_irq_postinstall(dev_priv);
4361 }
4362
4363 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4364 {
4365         /*
4366          * We only use drm_irq_uninstall() at unload and VT switch, so
4367          * this is the only thing we need to check.
4368          */
4369         return dev_priv->runtime_pm.irqs_enabled;
4370 }
4371
4372 void intel_synchronize_irq(struct drm_i915_private *i915)
4373 {
4374         synchronize_irq(i915->drm.pdev->irq);
4375 }