e0a4fddccac5b19aba64259bcb91e161659fa638
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/slab.h>
32 #include <linux/sysrq.h>
33
34 #include <drm/drm_drv.h>
35
36 #include "display/icl_dsi_regs.h"
37 #include "display/intel_de.h"
38 #include "display/intel_display_trace.h"
39 #include "display/intel_display_types.h"
40 #include "display/intel_fdi_regs.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45 #include "display/intel_psr_regs.h"
46
47 #include "gt/intel_breadcrumbs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_irq.h"
50 #include "gt/intel_gt_pm_irq.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/intel_rps.h"
53
54 #include "i915_driver.h"
55 #include "i915_drv.h"
56 #include "i915_irq.h"
57 #include "i915_reg.h"
58
59 /**
60  * DOC: interrupt handling
61  *
62  * These functions provide the basic support for enabling and disabling the
63  * interrupt handling support. There's a lot more functionality in i915_irq.c
64  * and related files, but that will be described in separate chapters.
65  */
66
67 /*
68  * Interrupt statistic for PMU. Increments the counter only if the
69  * interrupt originated from the GPU so interrupts from a device which
70  * shares the interrupt line are not accounted.
71  */
72 static inline void pmu_irq_stats(struct drm_i915_private *i915,
73                                  irqreturn_t res)
74 {
75         if (unlikely(res != IRQ_HANDLED))
76                 return;
77
78         /*
79          * A clever compiler translates that into INC. A not so clever one
80          * should at least prevent store tearing.
81          */
82         WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
83 }
84
85 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
86 typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
87 typedef u32 (*hotplug_mask_func)(enum hpd_pin pin);
88
89 static const u32 hpd_ilk[HPD_NUM_PINS] = {
90         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
91 };
92
93 static const u32 hpd_ivb[HPD_NUM_PINS] = {
94         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
95 };
96
97 static const u32 hpd_bdw[HPD_NUM_PINS] = {
98         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
99 };
100
101 static const u32 hpd_ibx[HPD_NUM_PINS] = {
102         [HPD_CRT] = SDE_CRT_HOTPLUG,
103         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
104         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
105         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
106         [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
107 };
108
109 static const u32 hpd_cpt[HPD_NUM_PINS] = {
110         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
111         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
112         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
113         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
114         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
115 };
116
117 static const u32 hpd_spt[HPD_NUM_PINS] = {
118         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
119         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
120         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
121         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
122         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
123 };
124
125 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
126         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
127         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
128         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
129         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
130         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
131         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
132 };
133
134 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
135         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
136         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
137         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
138         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
139         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
140         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
141 };
142
143 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
144         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
145         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
146         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
147         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
148         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
149         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
150 };
151
152 static const u32 hpd_bxt[HPD_NUM_PINS] = {
153         [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
154         [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
155         [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
156 };
157
158 static const u32 hpd_gen11[HPD_NUM_PINS] = {
159         [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
160         [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
161         [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
162         [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
163         [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
164         [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
165 };
166
167 static const u32 hpd_xelpdp[HPD_NUM_PINS] = {
168         [HPD_PORT_TC1] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC1) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC1),
169         [HPD_PORT_TC2] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC2) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC2),
170         [HPD_PORT_TC3] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC3) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC3),
171         [HPD_PORT_TC4] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC4) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC4),
172 };
173
174 static const u32 hpd_icp[HPD_NUM_PINS] = {
175         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
176         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
177         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
178         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
179         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
180         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
181         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
182         [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
183         [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
184 };
185
186 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
187         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
188         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
189         [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
190         [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
191         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
192 };
193
194 static const u32 hpd_mtp[HPD_NUM_PINS] = {
195         [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
196         [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
197         [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
198         [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
199         [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
200         [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
201 };
202
203 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
204 {
205         struct intel_hotplug *hpd = &dev_priv->display.hotplug;
206
207         if (HAS_GMCH(dev_priv)) {
208                 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
209                     IS_CHERRYVIEW(dev_priv))
210                         hpd->hpd = hpd_status_g4x;
211                 else
212                         hpd->hpd = hpd_status_i915;
213                 return;
214         }
215
216         if (DISPLAY_VER(dev_priv) >= 14)
217                 hpd->hpd = hpd_xelpdp;
218         else if (DISPLAY_VER(dev_priv) >= 11)
219                 hpd->hpd = hpd_gen11;
220         else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
221                 hpd->hpd = hpd_bxt;
222         else if (DISPLAY_VER(dev_priv) == 9)
223                 hpd->hpd = NULL; /* no north HPD on SKL */
224         else if (DISPLAY_VER(dev_priv) >= 8)
225                 hpd->hpd = hpd_bdw;
226         else if (DISPLAY_VER(dev_priv) >= 7)
227                 hpd->hpd = hpd_ivb;
228         else
229                 hpd->hpd = hpd_ilk;
230
231         if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
232             (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
233                 return;
234
235         if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
236                 hpd->pch_hpd = hpd_sde_dg1;
237         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
238                 hpd->pch_hpd = hpd_mtp;
239         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
240                 hpd->pch_hpd = hpd_icp;
241         else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
242                 hpd->pch_hpd = hpd_spt;
243         else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
244                 hpd->pch_hpd = hpd_cpt;
245         else if (HAS_PCH_IBX(dev_priv))
246                 hpd->pch_hpd = hpd_ibx;
247         else
248                 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
249 }
250
251 static void
252 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
253 {
254         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
255
256         drm_crtc_handle_vblank(&crtc->base);
257 }
258
259 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
260                     i915_reg_t iir, i915_reg_t ier)
261 {
262         intel_uncore_write(uncore, imr, 0xffffffff);
263         intel_uncore_posting_read(uncore, imr);
264
265         intel_uncore_write(uncore, ier, 0);
266
267         /* IIR can theoretically queue up two events. Be paranoid. */
268         intel_uncore_write(uncore, iir, 0xffffffff);
269         intel_uncore_posting_read(uncore, iir);
270         intel_uncore_write(uncore, iir, 0xffffffff);
271         intel_uncore_posting_read(uncore, iir);
272 }
273
274 static void gen2_irq_reset(struct intel_uncore *uncore)
275 {
276         intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
277         intel_uncore_posting_read16(uncore, GEN2_IMR);
278
279         intel_uncore_write16(uncore, GEN2_IER, 0);
280
281         /* IIR can theoretically queue up two events. Be paranoid. */
282         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
283         intel_uncore_posting_read16(uncore, GEN2_IIR);
284         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
285         intel_uncore_posting_read16(uncore, GEN2_IIR);
286 }
287
288 /*
289  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
290  */
291 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
292 {
293         u32 val = intel_uncore_read(uncore, reg);
294
295         if (val == 0)
296                 return;
297
298         drm_WARN(&uncore->i915->drm, 1,
299                  "Interrupt register 0x%x is not zero: 0x%08x\n",
300                  i915_mmio_reg_offset(reg), val);
301         intel_uncore_write(uncore, reg, 0xffffffff);
302         intel_uncore_posting_read(uncore, reg);
303         intel_uncore_write(uncore, reg, 0xffffffff);
304         intel_uncore_posting_read(uncore, reg);
305 }
306
307 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
308 {
309         u16 val = intel_uncore_read16(uncore, GEN2_IIR);
310
311         if (val == 0)
312                 return;
313
314         drm_WARN(&uncore->i915->drm, 1,
315                  "Interrupt register 0x%x is not zero: 0x%08x\n",
316                  i915_mmio_reg_offset(GEN2_IIR), val);
317         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
318         intel_uncore_posting_read16(uncore, GEN2_IIR);
319         intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
320         intel_uncore_posting_read16(uncore, GEN2_IIR);
321 }
322
323 void gen3_irq_init(struct intel_uncore *uncore,
324                    i915_reg_t imr, u32 imr_val,
325                    i915_reg_t ier, u32 ier_val,
326                    i915_reg_t iir)
327 {
328         gen3_assert_iir_is_zero(uncore, iir);
329
330         intel_uncore_write(uncore, ier, ier_val);
331         intel_uncore_write(uncore, imr, imr_val);
332         intel_uncore_posting_read(uncore, imr);
333 }
334
335 static void gen2_irq_init(struct intel_uncore *uncore,
336                           u32 imr_val, u32 ier_val)
337 {
338         gen2_assert_iir_is_zero(uncore);
339
340         intel_uncore_write16(uncore, GEN2_IER, ier_val);
341         intel_uncore_write16(uncore, GEN2_IMR, imr_val);
342         intel_uncore_posting_read16(uncore, GEN2_IMR);
343 }
344
345 /* For display hotplug interrupt */
346 static inline void
347 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
348                                      u32 mask,
349                                      u32 bits)
350 {
351         lockdep_assert_held(&dev_priv->irq_lock);
352         drm_WARN_ON(&dev_priv->drm, bits & ~mask);
353
354         intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN, mask, bits);
355 }
356
357 /**
358  * i915_hotplug_interrupt_update - update hotplug interrupt enable
359  * @dev_priv: driver private
360  * @mask: bits to update
361  * @bits: bits to enable
362  * NOTE: the HPD enable bits are modified both inside and outside
363  * of an interrupt context. To avoid that read-modify-write cycles
364  * interfer, these bits are protected by a spinlock. Since this
365  * function is usually not called from a context where the lock is
366  * held already, this function acquires the lock itself. A non-locking
367  * version is also available.
368  */
369 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
370                                    u32 mask,
371                                    u32 bits)
372 {
373         spin_lock_irq(&dev_priv->irq_lock);
374         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
375         spin_unlock_irq(&dev_priv->irq_lock);
376 }
377
378 /**
379  * ilk_update_display_irq - update DEIMR
380  * @dev_priv: driver private
381  * @interrupt_mask: mask of interrupt bits to update
382  * @enabled_irq_mask: mask of interrupt bits to enable
383  */
384 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
385                                    u32 interrupt_mask, u32 enabled_irq_mask)
386 {
387         u32 new_val;
388
389         lockdep_assert_held(&dev_priv->irq_lock);
390         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
391
392         new_val = dev_priv->irq_mask;
393         new_val &= ~interrupt_mask;
394         new_val |= (~enabled_irq_mask & interrupt_mask);
395
396         if (new_val != dev_priv->irq_mask &&
397             !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
398                 dev_priv->irq_mask = new_val;
399                 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
400                 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
401         }
402 }
403
404 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
405 {
406         ilk_update_display_irq(i915, bits, bits);
407 }
408
409 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
410 {
411         ilk_update_display_irq(i915, bits, 0);
412 }
413
414 /**
415  * bdw_update_port_irq - update DE port interrupt
416  * @dev_priv: driver private
417  * @interrupt_mask: mask of interrupt bits to update
418  * @enabled_irq_mask: mask of interrupt bits to enable
419  */
420 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
421                                 u32 interrupt_mask,
422                                 u32 enabled_irq_mask)
423 {
424         u32 new_val;
425         u32 old_val;
426
427         lockdep_assert_held(&dev_priv->irq_lock);
428
429         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
430
431         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
432                 return;
433
434         old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
435
436         new_val = old_val;
437         new_val &= ~interrupt_mask;
438         new_val |= (~enabled_irq_mask & interrupt_mask);
439
440         if (new_val != old_val) {
441                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
442                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
443         }
444 }
445
446 /**
447  * bdw_update_pipe_irq - update DE pipe interrupt
448  * @dev_priv: driver private
449  * @pipe: pipe whose interrupt to update
450  * @interrupt_mask: mask of interrupt bits to update
451  * @enabled_irq_mask: mask of interrupt bits to enable
452  */
453 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
454                                 enum pipe pipe, u32 interrupt_mask,
455                                 u32 enabled_irq_mask)
456 {
457         u32 new_val;
458
459         lockdep_assert_held(&dev_priv->irq_lock);
460
461         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
462
463         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
464                 return;
465
466         new_val = dev_priv->de_irq_mask[pipe];
467         new_val &= ~interrupt_mask;
468         new_val |= (~enabled_irq_mask & interrupt_mask);
469
470         if (new_val != dev_priv->de_irq_mask[pipe]) {
471                 dev_priv->de_irq_mask[pipe] = new_val;
472                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
473                 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
474         }
475 }
476
477 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
478                          enum pipe pipe, u32 bits)
479 {
480         bdw_update_pipe_irq(i915, pipe, bits, bits);
481 }
482
483 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
484                           enum pipe pipe, u32 bits)
485 {
486         bdw_update_pipe_irq(i915, pipe, bits, 0);
487 }
488
489 /**
490  * ibx_display_interrupt_update - update SDEIMR
491  * @dev_priv: driver private
492  * @interrupt_mask: mask of interrupt bits to update
493  * @enabled_irq_mask: mask of interrupt bits to enable
494  */
495 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
496                                          u32 interrupt_mask,
497                                          u32 enabled_irq_mask)
498 {
499         u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
500         sdeimr &= ~interrupt_mask;
501         sdeimr |= (~enabled_irq_mask & interrupt_mask);
502
503         drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
504
505         lockdep_assert_held(&dev_priv->irq_lock);
506
507         if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
508                 return;
509
510         intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
511         intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
512 }
513
514 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
515 {
516         ibx_display_interrupt_update(i915, bits, bits);
517 }
518
519 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
520 {
521         ibx_display_interrupt_update(i915, bits, 0);
522 }
523
524 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
525                               enum pipe pipe)
526 {
527         u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
528         u32 enable_mask = status_mask << 16;
529
530         lockdep_assert_held(&dev_priv->irq_lock);
531
532         if (DISPLAY_VER(dev_priv) < 5)
533                 goto out;
534
535         /*
536          * On pipe A we don't support the PSR interrupt yet,
537          * on pipe B and C the same bit MBZ.
538          */
539         if (drm_WARN_ON_ONCE(&dev_priv->drm,
540                              status_mask & PIPE_A_PSR_STATUS_VLV))
541                 return 0;
542         /*
543          * On pipe B and C we don't support the PSR interrupt yet, on pipe
544          * A the same bit is for perf counters which we don't use either.
545          */
546         if (drm_WARN_ON_ONCE(&dev_priv->drm,
547                              status_mask & PIPE_B_PSR_STATUS_VLV))
548                 return 0;
549
550         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
551                          SPRITE0_FLIP_DONE_INT_EN_VLV |
552                          SPRITE1_FLIP_DONE_INT_EN_VLV);
553         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
554                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
555         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
556                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
557
558 out:
559         drm_WARN_ONCE(&dev_priv->drm,
560                       enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
561                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
562                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
563                       pipe_name(pipe), enable_mask, status_mask);
564
565         return enable_mask;
566 }
567
568 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
569                           enum pipe pipe, u32 status_mask)
570 {
571         i915_reg_t reg = PIPESTAT(pipe);
572         u32 enable_mask;
573
574         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
575                       "pipe %c: status_mask=0x%x\n",
576                       pipe_name(pipe), status_mask);
577
578         lockdep_assert_held(&dev_priv->irq_lock);
579         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
580
581         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
582                 return;
583
584         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
585         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
586
587         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
588         intel_uncore_posting_read(&dev_priv->uncore, reg);
589 }
590
591 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
592                            enum pipe pipe, u32 status_mask)
593 {
594         i915_reg_t reg = PIPESTAT(pipe);
595         u32 enable_mask;
596
597         drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
598                       "pipe %c: status_mask=0x%x\n",
599                       pipe_name(pipe), status_mask);
600
601         lockdep_assert_held(&dev_priv->irq_lock);
602         drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
603
604         if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
605                 return;
606
607         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
608         enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
609
610         intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
611         intel_uncore_posting_read(&dev_priv->uncore, reg);
612 }
613
614 static bool i915_has_asle(struct drm_i915_private *dev_priv)
615 {
616         if (!dev_priv->display.opregion.asle)
617                 return false;
618
619         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
620 }
621
622 /**
623  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
624  * @dev_priv: i915 device private
625  */
626 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
627 {
628         if (!i915_has_asle(dev_priv))
629                 return;
630
631         spin_lock_irq(&dev_priv->irq_lock);
632
633         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
634         if (DISPLAY_VER(dev_priv) >= 4)
635                 i915_enable_pipestat(dev_priv, PIPE_A,
636                                      PIPE_LEGACY_BLC_EVENT_STATUS);
637
638         spin_unlock_irq(&dev_priv->irq_lock);
639 }
640
641 /**
642  * ivb_parity_work - Workqueue called when a parity error interrupt
643  * occurred.
644  * @work: workqueue struct
645  *
646  * Doesn't actually do anything except notify userspace. As a consequence of
647  * this event, userspace should try to remap the bad rows since statistically
648  * it is likely the same row is more likely to go bad again.
649  */
650 static void ivb_parity_work(struct work_struct *work)
651 {
652         struct drm_i915_private *dev_priv =
653                 container_of(work, typeof(*dev_priv), l3_parity.error_work);
654         struct intel_gt *gt = to_gt(dev_priv);
655         u32 error_status, row, bank, subbank;
656         char *parity_event[6];
657         u32 misccpctl;
658         u8 slice = 0;
659
660         /* We must turn off DOP level clock gating to access the L3 registers.
661          * In order to prevent a get/put style interface, acquire struct mutex
662          * any time we access those registers.
663          */
664         mutex_lock(&dev_priv->drm.struct_mutex);
665
666         /* If we've screwed up tracking, just let the interrupt fire again */
667         if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
668                 goto out;
669
670         misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
671                                      GEN7_DOP_CLOCK_GATE_ENABLE, 0);
672         intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
673
674         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
675                 i915_reg_t reg;
676
677                 slice--;
678                 if (drm_WARN_ON_ONCE(&dev_priv->drm,
679                                      slice >= NUM_L3_SLICES(dev_priv)))
680                         break;
681
682                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
683
684                 reg = GEN7_L3CDERRST1(slice);
685
686                 error_status = intel_uncore_read(&dev_priv->uncore, reg);
687                 row = GEN7_PARITY_ERROR_ROW(error_status);
688                 bank = GEN7_PARITY_ERROR_BANK(error_status);
689                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
690
691                 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
692                 intel_uncore_posting_read(&dev_priv->uncore, reg);
693
694                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
695                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
696                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
697                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
698                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
699                 parity_event[5] = NULL;
700
701                 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
702                                    KOBJ_CHANGE, parity_event);
703
704                 drm_dbg(&dev_priv->drm,
705                         "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
706                         slice, row, bank, subbank);
707
708                 kfree(parity_event[4]);
709                 kfree(parity_event[3]);
710                 kfree(parity_event[2]);
711                 kfree(parity_event[1]);
712         }
713
714         intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
715
716 out:
717         drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
718         spin_lock_irq(gt->irq_lock);
719         gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
720         spin_unlock_irq(gt->irq_lock);
721
722         mutex_unlock(&dev_priv->drm.struct_mutex);
723 }
724
725 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
726 {
727         switch (pin) {
728         case HPD_PORT_TC1:
729         case HPD_PORT_TC2:
730         case HPD_PORT_TC3:
731         case HPD_PORT_TC4:
732         case HPD_PORT_TC5:
733         case HPD_PORT_TC6:
734                 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
735         default:
736                 return false;
737         }
738 }
739
740 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
741 {
742         switch (pin) {
743         case HPD_PORT_A:
744                 return val & PORTA_HOTPLUG_LONG_DETECT;
745         case HPD_PORT_B:
746                 return val & PORTB_HOTPLUG_LONG_DETECT;
747         case HPD_PORT_C:
748                 return val & PORTC_HOTPLUG_LONG_DETECT;
749         default:
750                 return false;
751         }
752 }
753
754 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
755 {
756         switch (pin) {
757         case HPD_PORT_A:
758         case HPD_PORT_B:
759         case HPD_PORT_C:
760         case HPD_PORT_D:
761                 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
762         default:
763                 return false;
764         }
765 }
766
767 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
768 {
769         switch (pin) {
770         case HPD_PORT_TC1:
771         case HPD_PORT_TC2:
772         case HPD_PORT_TC3:
773         case HPD_PORT_TC4:
774         case HPD_PORT_TC5:
775         case HPD_PORT_TC6:
776                 return val & ICP_TC_HPD_LONG_DETECT(pin);
777         default:
778                 return false;
779         }
780 }
781
782 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
783 {
784         switch (pin) {
785         case HPD_PORT_E:
786                 return val & PORTE_HOTPLUG_LONG_DETECT;
787         default:
788                 return false;
789         }
790 }
791
792 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
793 {
794         switch (pin) {
795         case HPD_PORT_A:
796                 return val & PORTA_HOTPLUG_LONG_DETECT;
797         case HPD_PORT_B:
798                 return val & PORTB_HOTPLUG_LONG_DETECT;
799         case HPD_PORT_C:
800                 return val & PORTC_HOTPLUG_LONG_DETECT;
801         case HPD_PORT_D:
802                 return val & PORTD_HOTPLUG_LONG_DETECT;
803         default:
804                 return false;
805         }
806 }
807
808 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
809 {
810         switch (pin) {
811         case HPD_PORT_A:
812                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
813         default:
814                 return false;
815         }
816 }
817
818 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
819 {
820         switch (pin) {
821         case HPD_PORT_B:
822                 return val & PORTB_HOTPLUG_LONG_DETECT;
823         case HPD_PORT_C:
824                 return val & PORTC_HOTPLUG_LONG_DETECT;
825         case HPD_PORT_D:
826                 return val & PORTD_HOTPLUG_LONG_DETECT;
827         default:
828                 return false;
829         }
830 }
831
832 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
833 {
834         switch (pin) {
835         case HPD_PORT_B:
836                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
837         case HPD_PORT_C:
838                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
839         case HPD_PORT_D:
840                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
841         default:
842                 return false;
843         }
844 }
845
846 /*
847  * Get a bit mask of pins that have triggered, and which ones may be long.
848  * This can be called multiple times with the same masks to accumulate
849  * hotplug detection results from several registers.
850  *
851  * Note that the caller is expected to zero out the masks initially.
852  */
853 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
854                                u32 *pin_mask, u32 *long_mask,
855                                u32 hotplug_trigger, u32 dig_hotplug_reg,
856                                const u32 hpd[HPD_NUM_PINS],
857                                bool long_pulse_detect(enum hpd_pin pin, u32 val))
858 {
859         enum hpd_pin pin;
860
861         BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
862
863         for_each_hpd_pin(pin) {
864                 if ((hpd[pin] & hotplug_trigger) == 0)
865                         continue;
866
867                 *pin_mask |= BIT(pin);
868
869                 if (long_pulse_detect(pin, dig_hotplug_reg))
870                         *long_mask |= BIT(pin);
871         }
872
873         drm_dbg(&dev_priv->drm,
874                 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
875                 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
876
877 }
878
879 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
880                                   const u32 hpd[HPD_NUM_PINS])
881 {
882         struct intel_encoder *encoder;
883         u32 enabled_irqs = 0;
884
885         for_each_intel_encoder(&dev_priv->drm, encoder)
886                 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
887                         enabled_irqs |= hpd[encoder->hpd_pin];
888
889         return enabled_irqs;
890 }
891
892 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
893                                   const u32 hpd[HPD_NUM_PINS])
894 {
895         struct intel_encoder *encoder;
896         u32 hotplug_irqs = 0;
897
898         for_each_intel_encoder(&dev_priv->drm, encoder)
899                 hotplug_irqs |= hpd[encoder->hpd_pin];
900
901         return hotplug_irqs;
902 }
903
904 static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
905                                   hotplug_mask_func hotplug_mask)
906 {
907         enum hpd_pin pin;
908         u32 hotplug = 0;
909
910         for_each_hpd_pin(pin)
911                 hotplug |= hotplug_mask(pin);
912
913         return hotplug;
914 }
915
916 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
917                                      hotplug_enables_func hotplug_enables)
918 {
919         struct intel_encoder *encoder;
920         u32 hotplug = 0;
921
922         for_each_intel_encoder(&i915->drm, encoder)
923                 hotplug |= hotplug_enables(encoder);
924
925         return hotplug;
926 }
927
928 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
929 {
930         wake_up_all(&dev_priv->display.gmbus.wait_queue);
931 }
932
933 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
934 {
935         wake_up_all(&dev_priv->display.gmbus.wait_queue);
936 }
937
938 #if defined(CONFIG_DEBUG_FS)
939 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
940                                          enum pipe pipe,
941                                          u32 crc0, u32 crc1,
942                                          u32 crc2, u32 crc3,
943                                          u32 crc4)
944 {
945         struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
946         struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
947         u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
948
949         trace_intel_pipe_crc(crtc, crcs);
950
951         spin_lock(&pipe_crc->lock);
952         /*
953          * For some not yet identified reason, the first CRC is
954          * bonkers. So let's just wait for the next vblank and read
955          * out the buggy result.
956          *
957          * On GEN8+ sometimes the second CRC is bonkers as well, so
958          * don't trust that one either.
959          */
960         if (pipe_crc->skipped <= 0 ||
961             (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
962                 pipe_crc->skipped++;
963                 spin_unlock(&pipe_crc->lock);
964                 return;
965         }
966         spin_unlock(&pipe_crc->lock);
967
968         drm_crtc_add_crc_entry(&crtc->base, true,
969                                 drm_crtc_accurate_vblank_count(&crtc->base),
970                                 crcs);
971 }
972 #else
973 static inline void
974 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
975                              enum pipe pipe,
976                              u32 crc0, u32 crc1,
977                              u32 crc2, u32 crc3,
978                              u32 crc4) {}
979 #endif
980
981 static void flip_done_handler(struct drm_i915_private *i915,
982                               enum pipe pipe)
983 {
984         struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
985         struct drm_crtc_state *crtc_state = crtc->base.state;
986         struct drm_pending_vblank_event *e = crtc_state->event;
987         struct drm_device *dev = &i915->drm;
988         unsigned long irqflags;
989
990         spin_lock_irqsave(&dev->event_lock, irqflags);
991
992         crtc_state->event = NULL;
993
994         drm_crtc_send_vblank_event(&crtc->base, e);
995
996         spin_unlock_irqrestore(&dev->event_lock, irqflags);
997 }
998
999 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1000                                      enum pipe pipe)
1001 {
1002         display_pipe_crc_irq_handler(dev_priv, pipe,
1003                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1004                                      0, 0, 0, 0);
1005 }
1006
1007 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1008                                      enum pipe pipe)
1009 {
1010         display_pipe_crc_irq_handler(dev_priv, pipe,
1011                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1012                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1013                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1014                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1015                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1016 }
1017
1018 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1019                                       enum pipe pipe)
1020 {
1021         u32 res1, res2;
1022
1023         if (DISPLAY_VER(dev_priv) >= 3)
1024                 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1025         else
1026                 res1 = 0;
1027
1028         if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1029                 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1030         else
1031                 res2 = 0;
1032
1033         display_pipe_crc_irq_handler(dev_priv, pipe,
1034                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1035                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1036                                      intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1037                                      res1, res2);
1038 }
1039
1040 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1041 {
1042         enum pipe pipe;
1043
1044         for_each_pipe(dev_priv, pipe) {
1045                 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1046                            PIPESTAT_INT_STATUS_MASK |
1047                            PIPE_FIFO_UNDERRUN_STATUS);
1048
1049                 dev_priv->pipestat_irq_mask[pipe] = 0;
1050         }
1051 }
1052
1053 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1054                                   u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1055 {
1056         enum pipe pipe;
1057
1058         spin_lock(&dev_priv->irq_lock);
1059
1060         if (!dev_priv->display_irqs_enabled) {
1061                 spin_unlock(&dev_priv->irq_lock);
1062                 return;
1063         }
1064
1065         for_each_pipe(dev_priv, pipe) {
1066                 i915_reg_t reg;
1067                 u32 status_mask, enable_mask, iir_bit = 0;
1068
1069                 /*
1070                  * PIPESTAT bits get signalled even when the interrupt is
1071                  * disabled with the mask bits, and some of the status bits do
1072                  * not generate interrupts at all (like the underrun bit). Hence
1073                  * we need to be careful that we only handle what we want to
1074                  * handle.
1075                  */
1076
1077                 /* fifo underruns are filterered in the underrun handler. */
1078                 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1079
1080                 switch (pipe) {
1081                 default:
1082                 case PIPE_A:
1083                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1084                         break;
1085                 case PIPE_B:
1086                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1087                         break;
1088                 case PIPE_C:
1089                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1090                         break;
1091                 }
1092                 if (iir & iir_bit)
1093                         status_mask |= dev_priv->pipestat_irq_mask[pipe];
1094
1095                 if (!status_mask)
1096                         continue;
1097
1098                 reg = PIPESTAT(pipe);
1099                 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1100                 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1101
1102                 /*
1103                  * Clear the PIPE*STAT regs before the IIR
1104                  *
1105                  * Toggle the enable bits to make sure we get an
1106                  * edge in the ISR pipe event bit if we don't clear
1107                  * all the enabled status bits. Otherwise the edge
1108                  * triggered IIR on i965/g4x wouldn't notice that
1109                  * an interrupt is still pending.
1110                  */
1111                 if (pipe_stats[pipe]) {
1112                         intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1113                         intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1114                 }
1115         }
1116         spin_unlock(&dev_priv->irq_lock);
1117 }
1118
1119 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1120                                       u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1121 {
1122         enum pipe pipe;
1123
1124         for_each_pipe(dev_priv, pipe) {
1125                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1126                         intel_handle_vblank(dev_priv, pipe);
1127
1128                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1129                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1130
1131                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1132                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1133         }
1134 }
1135
1136 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1137                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1138 {
1139         bool blc_event = false;
1140         enum pipe pipe;
1141
1142         for_each_pipe(dev_priv, pipe) {
1143                 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1144                         intel_handle_vblank(dev_priv, pipe);
1145
1146                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1147                         blc_event = true;
1148
1149                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1150                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1151
1152                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1153                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1154         }
1155
1156         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1157                 intel_opregion_asle_intr(dev_priv);
1158 }
1159
1160 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1161                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1162 {
1163         bool blc_event = false;
1164         enum pipe pipe;
1165
1166         for_each_pipe(dev_priv, pipe) {
1167                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1168                         intel_handle_vblank(dev_priv, pipe);
1169
1170                 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1171                         blc_event = true;
1172
1173                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1174                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1175
1176                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1177                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1178         }
1179
1180         if (blc_event || (iir & I915_ASLE_INTERRUPT))
1181                 intel_opregion_asle_intr(dev_priv);
1182
1183         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1184                 gmbus_irq_handler(dev_priv);
1185 }
1186
1187 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1188                                             u32 pipe_stats[I915_MAX_PIPES])
1189 {
1190         enum pipe pipe;
1191
1192         for_each_pipe(dev_priv, pipe) {
1193                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1194                         intel_handle_vblank(dev_priv, pipe);
1195
1196                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1197                         flip_done_handler(dev_priv, pipe);
1198
1199                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1200                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1201
1202                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1203                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1204         }
1205
1206         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1207                 gmbus_irq_handler(dev_priv);
1208 }
1209
1210 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1211 {
1212         u32 hotplug_status = 0, hotplug_status_mask;
1213         int i;
1214
1215         if (IS_G4X(dev_priv) ||
1216             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1217                 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1218                         DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1219         else
1220                 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1221
1222         /*
1223          * We absolutely have to clear all the pending interrupt
1224          * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1225          * interrupt bit won't have an edge, and the i965/g4x
1226          * edge triggered IIR will not notice that an interrupt
1227          * is still pending. We can't use PORT_HOTPLUG_EN to
1228          * guarantee the edge as the act of toggling the enable
1229          * bits can itself generate a new hotplug interrupt :(
1230          */
1231         for (i = 0; i < 10; i++) {
1232                 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1233
1234                 if (tmp == 0)
1235                         return hotplug_status;
1236
1237                 hotplug_status |= tmp;
1238                 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1239         }
1240
1241         drm_WARN_ONCE(&dev_priv->drm, 1,
1242                       "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1243                       intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1244
1245         return hotplug_status;
1246 }
1247
1248 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1249                                  u32 hotplug_status)
1250 {
1251         u32 pin_mask = 0, long_mask = 0;
1252         u32 hotplug_trigger;
1253
1254         if (IS_G4X(dev_priv) ||
1255             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1256                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1257         else
1258                 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1259
1260         if (hotplug_trigger) {
1261                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1262                                    hotplug_trigger, hotplug_trigger,
1263                                    dev_priv->display.hotplug.hpd,
1264                                    i9xx_port_hotplug_long_detect);
1265
1266                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1267         }
1268
1269         if ((IS_G4X(dev_priv) ||
1270              IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1271             hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1272                 dp_aux_irq_handler(dev_priv);
1273 }
1274
1275 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1276 {
1277         struct drm_i915_private *dev_priv = arg;
1278         irqreturn_t ret = IRQ_NONE;
1279
1280         if (!intel_irqs_enabled(dev_priv))
1281                 return IRQ_NONE;
1282
1283         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1284         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1285
1286         do {
1287                 u32 iir, gt_iir, pm_iir;
1288                 u32 pipe_stats[I915_MAX_PIPES] = {};
1289                 u32 hotplug_status = 0;
1290                 u32 ier = 0;
1291
1292                 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1293                 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1294                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1295
1296                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1297                         break;
1298
1299                 ret = IRQ_HANDLED;
1300
1301                 /*
1302                  * Theory on interrupt generation, based on empirical evidence:
1303                  *
1304                  * x = ((VLV_IIR & VLV_IER) ||
1305                  *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1306                  *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1307                  *
1308                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1309                  * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1310                  * guarantee the CPU interrupt will be raised again even if we
1311                  * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1312                  * bits this time around.
1313                  */
1314                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1315                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1316
1317                 if (gt_iir)
1318                         intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1319                 if (pm_iir)
1320                         intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1321
1322                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1323                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1324
1325                 /* Call regardless, as some status bits might not be
1326                  * signalled in iir */
1327                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1328
1329                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1330                            I915_LPE_PIPE_B_INTERRUPT))
1331                         intel_lpe_audio_irq_handler(dev_priv);
1332
1333                 /*
1334                  * VLV_IIR is single buffered, and reflects the level
1335                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1336                  */
1337                 if (iir)
1338                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1339
1340                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1341                 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1342
1343                 if (gt_iir)
1344                         gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1345                 if (pm_iir)
1346                         gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1347
1348                 if (hotplug_status)
1349                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1350
1351                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1352         } while (0);
1353
1354         pmu_irq_stats(dev_priv, ret);
1355
1356         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1357
1358         return ret;
1359 }
1360
1361 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1362 {
1363         struct drm_i915_private *dev_priv = arg;
1364         irqreturn_t ret = IRQ_NONE;
1365
1366         if (!intel_irqs_enabled(dev_priv))
1367                 return IRQ_NONE;
1368
1369         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1370         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1371
1372         do {
1373                 u32 master_ctl, iir;
1374                 u32 pipe_stats[I915_MAX_PIPES] = {};
1375                 u32 hotplug_status = 0;
1376                 u32 ier = 0;
1377
1378                 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1379                 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1380
1381                 if (master_ctl == 0 && iir == 0)
1382                         break;
1383
1384                 ret = IRQ_HANDLED;
1385
1386                 /*
1387                  * Theory on interrupt generation, based on empirical evidence:
1388                  *
1389                  * x = ((VLV_IIR & VLV_IER) ||
1390                  *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1391                  *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1392                  *
1393                  * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1394                  * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1395                  * guarantee the CPU interrupt will be raised again even if we
1396                  * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1397                  * bits this time around.
1398                  */
1399                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1400                 ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0);
1401
1402                 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1403
1404                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1405                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1406
1407                 /* Call regardless, as some status bits might not be
1408                  * signalled in iir */
1409                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1410
1411                 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1412                            I915_LPE_PIPE_B_INTERRUPT |
1413                            I915_LPE_PIPE_C_INTERRUPT))
1414                         intel_lpe_audio_irq_handler(dev_priv);
1415
1416                 /*
1417                  * VLV_IIR is single buffered, and reflects the level
1418                  * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1419                  */
1420                 if (iir)
1421                         intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1422
1423                 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1424                 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1425
1426                 if (hotplug_status)
1427                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1428
1429                 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1430         } while (0);
1431
1432         pmu_irq_stats(dev_priv, ret);
1433
1434         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1435
1436         return ret;
1437 }
1438
1439 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1440                                 u32 hotplug_trigger)
1441 {
1442         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1443
1444         /*
1445          * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1446          * unless we touch the hotplug register, even if hotplug_trigger is
1447          * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1448          * errors.
1449          */
1450         dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1451         if (!hotplug_trigger) {
1452                 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1453                         PORTD_HOTPLUG_STATUS_MASK |
1454                         PORTC_HOTPLUG_STATUS_MASK |
1455                         PORTB_HOTPLUG_STATUS_MASK;
1456                 dig_hotplug_reg &= ~mask;
1457         }
1458
1459         intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1460         if (!hotplug_trigger)
1461                 return;
1462
1463         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1464                            hotplug_trigger, dig_hotplug_reg,
1465                            dev_priv->display.hotplug.pch_hpd,
1466                            pch_port_hotplug_long_detect);
1467
1468         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1469 }
1470
1471 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1472 {
1473         enum pipe pipe;
1474         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1475
1476         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1477
1478         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1479                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1480                                SDE_AUDIO_POWER_SHIFT);
1481                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1482                         port_name(port));
1483         }
1484
1485         if (pch_iir & SDE_AUX_MASK)
1486                 dp_aux_irq_handler(dev_priv);
1487
1488         if (pch_iir & SDE_GMBUS)
1489                 gmbus_irq_handler(dev_priv);
1490
1491         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1492                 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1493
1494         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1495                 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1496
1497         if (pch_iir & SDE_POISON)
1498                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1499
1500         if (pch_iir & SDE_FDI_MASK) {
1501                 for_each_pipe(dev_priv, pipe)
1502                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1503                                 pipe_name(pipe),
1504                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1505         }
1506
1507         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1508                 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1509
1510         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1511                 drm_dbg(&dev_priv->drm,
1512                         "PCH transcoder CRC error interrupt\n");
1513
1514         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1515                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1516
1517         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1518                 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1519 }
1520
1521 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1522 {
1523         u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1524         enum pipe pipe;
1525
1526         if (err_int & ERR_INT_POISON)
1527                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1528
1529         for_each_pipe(dev_priv, pipe) {
1530                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1531                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1532
1533                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1534                         if (IS_IVYBRIDGE(dev_priv))
1535                                 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1536                         else
1537                                 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1538                 }
1539         }
1540
1541         intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1542 }
1543
1544 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1545 {
1546         u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1547         enum pipe pipe;
1548
1549         if (serr_int & SERR_INT_POISON)
1550                 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1551
1552         for_each_pipe(dev_priv, pipe)
1553                 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1554                         intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1555
1556         intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1557 }
1558
1559 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1560 {
1561         enum pipe pipe;
1562         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1563
1564         ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1565
1566         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1567                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1568                                SDE_AUDIO_POWER_SHIFT_CPT);
1569                 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1570                         port_name(port));
1571         }
1572
1573         if (pch_iir & SDE_AUX_MASK_CPT)
1574                 dp_aux_irq_handler(dev_priv);
1575
1576         if (pch_iir & SDE_GMBUS_CPT)
1577                 gmbus_irq_handler(dev_priv);
1578
1579         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1580                 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1581
1582         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1583                 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1584
1585         if (pch_iir & SDE_FDI_MASK_CPT) {
1586                 for_each_pipe(dev_priv, pipe)
1587                         drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
1588                                 pipe_name(pipe),
1589                                 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1590         }
1591
1592         if (pch_iir & SDE_ERROR_CPT)
1593                 cpt_serr_int_handler(dev_priv);
1594 }
1595
1596 static void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
1597 {
1598         enum hpd_pin pin;
1599         u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
1600         u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
1601         u32 pin_mask = 0, long_mask = 0;
1602
1603         for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
1604                 u32 val;
1605
1606                 if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
1607                         continue;
1608
1609                 pin_mask |= BIT(pin);
1610
1611                 val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
1612                 intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
1613
1614                 if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
1615                         long_mask |= BIT(pin);
1616         }
1617
1618         if (pin_mask) {
1619                 drm_dbg(&i915->drm,
1620                         "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
1621                         hotplug_trigger, pin_mask, long_mask);
1622
1623                 intel_hpd_irq_handler(i915, pin_mask, long_mask);
1624         }
1625
1626         if (trigger_aux)
1627                 dp_aux_irq_handler(i915);
1628
1629         if (!pin_mask && !trigger_aux)
1630                 drm_err(&i915->drm,
1631                         "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
1632 }
1633
1634 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1635 {
1636         u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1637         u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1638         u32 pin_mask = 0, long_mask = 0;
1639
1640         if (ddi_hotplug_trigger) {
1641                 u32 dig_hotplug_reg;
1642
1643                 /* Locking due to DSI native GPIO sequences */
1644                 spin_lock(&dev_priv->irq_lock);
1645                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
1646                 spin_unlock(&dev_priv->irq_lock);
1647
1648                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1649                                    ddi_hotplug_trigger, dig_hotplug_reg,
1650                                    dev_priv->display.hotplug.pch_hpd,
1651                                    icp_ddi_port_hotplug_long_detect);
1652         }
1653
1654         if (tc_hotplug_trigger) {
1655                 u32 dig_hotplug_reg;
1656
1657                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0);
1658
1659                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1660                                    tc_hotplug_trigger, dig_hotplug_reg,
1661                                    dev_priv->display.hotplug.pch_hpd,
1662                                    icp_tc_port_hotplug_long_detect);
1663         }
1664
1665         if (pin_mask)
1666                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1667
1668         if (pch_iir & SDE_GMBUS_ICP)
1669                 gmbus_irq_handler(dev_priv);
1670 }
1671
1672 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1673 {
1674         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1675                 ~SDE_PORTE_HOTPLUG_SPT;
1676         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1677         u32 pin_mask = 0, long_mask = 0;
1678
1679         if (hotplug_trigger) {
1680                 u32 dig_hotplug_reg;
1681
1682                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1683
1684                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1685                                    hotplug_trigger, dig_hotplug_reg,
1686                                    dev_priv->display.hotplug.pch_hpd,
1687                                    spt_port_hotplug_long_detect);
1688         }
1689
1690         if (hotplug2_trigger) {
1691                 u32 dig_hotplug_reg;
1692
1693                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0);
1694
1695                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1696                                    hotplug2_trigger, dig_hotplug_reg,
1697                                    dev_priv->display.hotplug.pch_hpd,
1698                                    spt_port_hotplug2_long_detect);
1699         }
1700
1701         if (pin_mask)
1702                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1703
1704         if (pch_iir & SDE_GMBUS_CPT)
1705                 gmbus_irq_handler(dev_priv);
1706 }
1707
1708 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1709                                 u32 hotplug_trigger)
1710 {
1711         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1712
1713         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0);
1714
1715         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1716                            hotplug_trigger, dig_hotplug_reg,
1717                            dev_priv->display.hotplug.hpd,
1718                            ilk_port_hotplug_long_detect);
1719
1720         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1721 }
1722
1723 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1724                                     u32 de_iir)
1725 {
1726         enum pipe pipe;
1727         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1728
1729         if (hotplug_trigger)
1730                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1731
1732         if (de_iir & DE_AUX_CHANNEL_A)
1733                 dp_aux_irq_handler(dev_priv);
1734
1735         if (de_iir & DE_GSE)
1736                 intel_opregion_asle_intr(dev_priv);
1737
1738         if (de_iir & DE_POISON)
1739                 drm_err(&dev_priv->drm, "Poison interrupt\n");
1740
1741         for_each_pipe(dev_priv, pipe) {
1742                 if (de_iir & DE_PIPE_VBLANK(pipe))
1743                         intel_handle_vblank(dev_priv, pipe);
1744
1745                 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
1746                         flip_done_handler(dev_priv, pipe);
1747
1748                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1749                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1750
1751                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1752                         i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1753         }
1754
1755         /* check event from PCH */
1756         if (de_iir & DE_PCH_EVENT) {
1757                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1758
1759                 if (HAS_PCH_CPT(dev_priv))
1760                         cpt_irq_handler(dev_priv, pch_iir);
1761                 else
1762                         ibx_irq_handler(dev_priv, pch_iir);
1763
1764                 /* should clear PCH hotplug event before clear CPU irq */
1765                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1766         }
1767
1768         if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
1769                 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
1770 }
1771
1772 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
1773                                     u32 de_iir)
1774 {
1775         enum pipe pipe;
1776         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
1777
1778         if (hotplug_trigger)
1779                 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1780
1781         if (de_iir & DE_ERR_INT_IVB)
1782                 ivb_err_int_handler(dev_priv);
1783
1784         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1785                 dp_aux_irq_handler(dev_priv);
1786
1787         if (de_iir & DE_GSE_IVB)
1788                 intel_opregion_asle_intr(dev_priv);
1789
1790         for_each_pipe(dev_priv, pipe) {
1791                 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
1792                         intel_handle_vblank(dev_priv, pipe);
1793
1794                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
1795                         flip_done_handler(dev_priv, pipe);
1796         }
1797
1798         /* check event from PCH */
1799         if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
1800                 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
1801
1802                 cpt_irq_handler(dev_priv, pch_iir);
1803
1804                 /* clear PCH hotplug event before clear CPU irq */
1805                 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
1806         }
1807 }
1808
1809 /*
1810  * To handle irqs with the minimum potential races with fresh interrupts, we:
1811  * 1 - Disable Master Interrupt Control.
1812  * 2 - Find the source(s) of the interrupt.
1813  * 3 - Clear the Interrupt Identity bits (IIR).
1814  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1815  * 5 - Re-enable Master Interrupt Control.
1816  */
1817 static irqreturn_t ilk_irq_handler(int irq, void *arg)
1818 {
1819         struct drm_i915_private *i915 = arg;
1820         void __iomem * const regs = i915->uncore.regs;
1821         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1822         irqreturn_t ret = IRQ_NONE;
1823
1824         if (unlikely(!intel_irqs_enabled(i915)))
1825                 return IRQ_NONE;
1826
1827         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1828         disable_rpm_wakeref_asserts(&i915->runtime_pm);
1829
1830         /* disable master interrupt before clearing iir  */
1831         de_ier = raw_reg_read(regs, DEIER);
1832         raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1833
1834         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1835          * interrupts will will be stored on its back queue, and then we'll be
1836          * able to process them after we restore SDEIER (as soon as we restore
1837          * it, we'll get an interrupt if SDEIIR still has something to process
1838          * due to its back queue). */
1839         if (!HAS_PCH_NOP(i915)) {
1840                 sde_ier = raw_reg_read(regs, SDEIER);
1841                 raw_reg_write(regs, SDEIER, 0);
1842         }
1843
1844         /* Find, clear, then process each source of interrupt */
1845
1846         gt_iir = raw_reg_read(regs, GTIIR);
1847         if (gt_iir) {
1848                 raw_reg_write(regs, GTIIR, gt_iir);
1849                 if (GRAPHICS_VER(i915) >= 6)
1850                         gen6_gt_irq_handler(to_gt(i915), gt_iir);
1851                 else
1852                         gen5_gt_irq_handler(to_gt(i915), gt_iir);
1853                 ret = IRQ_HANDLED;
1854         }
1855
1856         de_iir = raw_reg_read(regs, DEIIR);
1857         if (de_iir) {
1858                 raw_reg_write(regs, DEIIR, de_iir);
1859                 if (DISPLAY_VER(i915) >= 7)
1860                         ivb_display_irq_handler(i915, de_iir);
1861                 else
1862                         ilk_display_irq_handler(i915, de_iir);
1863                 ret = IRQ_HANDLED;
1864         }
1865
1866         if (GRAPHICS_VER(i915) >= 6) {
1867                 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
1868                 if (pm_iir) {
1869                         raw_reg_write(regs, GEN6_PMIIR, pm_iir);
1870                         gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
1871                         ret = IRQ_HANDLED;
1872                 }
1873         }
1874
1875         raw_reg_write(regs, DEIER, de_ier);
1876         if (sde_ier)
1877                 raw_reg_write(regs, SDEIER, sde_ier);
1878
1879         pmu_irq_stats(i915, ret);
1880
1881         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1882         enable_rpm_wakeref_asserts(&i915->runtime_pm);
1883
1884         return ret;
1885 }
1886
1887 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
1888                                 u32 hotplug_trigger)
1889 {
1890         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1891
1892         dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0);
1893
1894         intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1895                            hotplug_trigger, dig_hotplug_reg,
1896                            dev_priv->display.hotplug.hpd,
1897                            bxt_port_hotplug_long_detect);
1898
1899         intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1900 }
1901
1902 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1903 {
1904         u32 pin_mask = 0, long_mask = 0;
1905         u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
1906         u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
1907
1908         if (trigger_tc) {
1909                 u32 dig_hotplug_reg;
1910
1911                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0);
1912
1913                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1914                                    trigger_tc, dig_hotplug_reg,
1915                                    dev_priv->display.hotplug.hpd,
1916                                    gen11_port_hotplug_long_detect);
1917         }
1918
1919         if (trigger_tbt) {
1920                 u32 dig_hotplug_reg;
1921
1922                 dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0);
1923
1924                 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1925                                    trigger_tbt, dig_hotplug_reg,
1926                                    dev_priv->display.hotplug.hpd,
1927                                    gen11_port_hotplug_long_detect);
1928         }
1929
1930         if (pin_mask)
1931                 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1932         else
1933                 drm_err(&dev_priv->drm,
1934                         "Unexpected DE HPD interrupt 0x%08x\n", iir);
1935 }
1936
1937 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
1938 {
1939         u32 mask;
1940
1941         if (DISPLAY_VER(dev_priv) >= 14)
1942                 return TGL_DE_PORT_AUX_DDIA |
1943                         TGL_DE_PORT_AUX_DDIB;
1944         else if (DISPLAY_VER(dev_priv) >= 13)
1945                 return TGL_DE_PORT_AUX_DDIA |
1946                         TGL_DE_PORT_AUX_DDIB |
1947                         TGL_DE_PORT_AUX_DDIC |
1948                         XELPD_DE_PORT_AUX_DDID |
1949                         XELPD_DE_PORT_AUX_DDIE |
1950                         TGL_DE_PORT_AUX_USBC1 |
1951                         TGL_DE_PORT_AUX_USBC2 |
1952                         TGL_DE_PORT_AUX_USBC3 |
1953                         TGL_DE_PORT_AUX_USBC4;
1954         else if (DISPLAY_VER(dev_priv) >= 12)
1955                 return TGL_DE_PORT_AUX_DDIA |
1956                         TGL_DE_PORT_AUX_DDIB |
1957                         TGL_DE_PORT_AUX_DDIC |
1958                         TGL_DE_PORT_AUX_USBC1 |
1959                         TGL_DE_PORT_AUX_USBC2 |
1960                         TGL_DE_PORT_AUX_USBC3 |
1961                         TGL_DE_PORT_AUX_USBC4 |
1962                         TGL_DE_PORT_AUX_USBC5 |
1963                         TGL_DE_PORT_AUX_USBC6;
1964
1965
1966         mask = GEN8_AUX_CHANNEL_A;
1967         if (DISPLAY_VER(dev_priv) >= 9)
1968                 mask |= GEN9_AUX_CHANNEL_B |
1969                         GEN9_AUX_CHANNEL_C |
1970                         GEN9_AUX_CHANNEL_D;
1971
1972         if (DISPLAY_VER(dev_priv) == 11) {
1973                 mask |= ICL_AUX_CHANNEL_F;
1974                 mask |= ICL_AUX_CHANNEL_E;
1975         }
1976
1977         return mask;
1978 }
1979
1980 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
1981 {
1982         if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
1983                 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
1984         else if (DISPLAY_VER(dev_priv) >= 11)
1985                 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
1986         else if (DISPLAY_VER(dev_priv) >= 9)
1987                 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
1988         else
1989                 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
1990 }
1991
1992 static void
1993 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
1994 {
1995         bool found = false;
1996
1997         if (iir & GEN8_DE_MISC_GSE) {
1998                 intel_opregion_asle_intr(dev_priv);
1999                 found = true;
2000         }
2001
2002         if (iir & GEN8_DE_EDP_PSR) {
2003                 struct intel_encoder *encoder;
2004                 u32 psr_iir;
2005                 i915_reg_t iir_reg;
2006
2007                 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2008                         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2009
2010                         if (DISPLAY_VER(dev_priv) >= 12)
2011                                 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2012                         else
2013                                 iir_reg = EDP_PSR_IIR;
2014
2015                         psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
2016
2017                         if (psr_iir)
2018                                 found = true;
2019
2020                         intel_psr_irq_handler(intel_dp, psr_iir);
2021
2022                         /* prior GEN12 only have one EDP PSR */
2023                         if (DISPLAY_VER(dev_priv) < 12)
2024                                 break;
2025                 }
2026         }
2027
2028         if (!found)
2029                 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2030 }
2031
2032 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2033                                            u32 te_trigger)
2034 {
2035         enum pipe pipe = INVALID_PIPE;
2036         enum transcoder dsi_trans;
2037         enum port port;
2038         u32 val, tmp;
2039
2040         /*
2041          * Incase of dual link, TE comes from DSI_1
2042          * this is to check if dual link is enabled
2043          */
2044         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2045         val &= PORT_SYNC_MODE_ENABLE;
2046
2047         /*
2048          * if dual link is enabled, then read DSI_0
2049          * transcoder registers
2050          */
2051         port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2052                                                   PORT_A : PORT_B;
2053         dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2054
2055         /* Check if DSI configured in command mode */
2056         val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2057         val = val & OP_MODE_MASK;
2058
2059         if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2060                 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2061                 return;
2062         }
2063
2064         /* Get PIPE for handling VBLANK event */
2065         val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2066         switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2067         case TRANS_DDI_EDP_INPUT_A_ON:
2068                 pipe = PIPE_A;
2069                 break;
2070         case TRANS_DDI_EDP_INPUT_B_ONOFF:
2071                 pipe = PIPE_B;
2072                 break;
2073         case TRANS_DDI_EDP_INPUT_C_ONOFF:
2074                 pipe = PIPE_C;
2075                 break;
2076         default:
2077                 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2078                 return;
2079         }
2080
2081         intel_handle_vblank(dev_priv, pipe);
2082
2083         /* clear TE in dsi IIR */
2084         port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2085         tmp = intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2086 }
2087
2088 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2089 {
2090         if (DISPLAY_VER(i915) >= 9)
2091                 return GEN9_PIPE_PLANE1_FLIP_DONE;
2092         else
2093                 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2094 }
2095
2096 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2097 {
2098         u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2099
2100         if (DISPLAY_VER(dev_priv) >= 13)
2101                 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2102                         XELPD_PIPE_HARD_UNDERRUN;
2103
2104         return mask;
2105 }
2106
2107 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
2108 {
2109         u32 pica_ier = 0;
2110
2111         *pica_iir = 0;
2112         *pch_iir = intel_de_read(i915, SDEIIR);
2113         if (!*pch_iir)
2114                 return;
2115
2116         /**
2117          * PICA IER must be disabled/re-enabled around clearing PICA IIR and
2118          * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
2119          * their flags both in the PICA and SDE IIR.
2120          */
2121         if (*pch_iir & SDE_PICAINTERRUPT) {
2122                 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
2123
2124                 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
2125                 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
2126                 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
2127         }
2128
2129         intel_de_write(i915, SDEIIR, *pch_iir);
2130
2131         if (pica_ier)
2132                 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
2133 }
2134
2135 static irqreturn_t
2136 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2137 {
2138         irqreturn_t ret = IRQ_NONE;
2139         u32 iir;
2140         enum pipe pipe;
2141
2142         drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2143
2144         if (master_ctl & GEN8_DE_MISC_IRQ) {
2145                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2146                 if (iir) {
2147                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2148                         ret = IRQ_HANDLED;
2149                         gen8_de_misc_irq_handler(dev_priv, iir);
2150                 } else {
2151                         drm_err_ratelimited(&dev_priv->drm,
2152                                             "The master control interrupt lied (DE MISC)!\n");
2153                 }
2154         }
2155
2156         if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2157                 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2158                 if (iir) {
2159                         intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2160                         ret = IRQ_HANDLED;
2161                         gen11_hpd_irq_handler(dev_priv, iir);
2162                 } else {
2163                         drm_err_ratelimited(&dev_priv->drm,
2164                                             "The master control interrupt lied, (DE HPD)!\n");
2165                 }
2166         }
2167
2168         if (master_ctl & GEN8_DE_PORT_IRQ) {
2169                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2170                 if (iir) {
2171                         bool found = false;
2172
2173                         intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2174                         ret = IRQ_HANDLED;
2175
2176                         if (iir & gen8_de_port_aux_mask(dev_priv)) {
2177                                 dp_aux_irq_handler(dev_priv);
2178                                 found = true;
2179                         }
2180
2181                         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2182                                 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2183
2184                                 if (hotplug_trigger) {
2185                                         bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2186                                         found = true;
2187                                 }
2188                         } else if (IS_BROADWELL(dev_priv)) {
2189                                 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2190
2191                                 if (hotplug_trigger) {
2192                                         ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2193                                         found = true;
2194                                 }
2195                         }
2196
2197                         if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2198                             (iir & BXT_DE_PORT_GMBUS)) {
2199                                 gmbus_irq_handler(dev_priv);
2200                                 found = true;
2201                         }
2202
2203                         if (DISPLAY_VER(dev_priv) >= 11) {
2204                                 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2205
2206                                 if (te_trigger) {
2207                                         gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2208                                         found = true;
2209                                 }
2210                         }
2211
2212                         if (!found)
2213                                 drm_err_ratelimited(&dev_priv->drm,
2214                                                     "Unexpected DE Port interrupt\n");
2215                 }
2216                 else
2217                         drm_err_ratelimited(&dev_priv->drm,
2218                                             "The master control interrupt lied (DE PORT)!\n");
2219         }
2220
2221         for_each_pipe(dev_priv, pipe) {
2222                 u32 fault_errors;
2223
2224                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2225                         continue;
2226
2227                 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2228                 if (!iir) {
2229                         drm_err_ratelimited(&dev_priv->drm,
2230                                             "The master control interrupt lied (DE PIPE)!\n");
2231                         continue;
2232                 }
2233
2234                 ret = IRQ_HANDLED;
2235                 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2236
2237                 if (iir & GEN8_PIPE_VBLANK)
2238                         intel_handle_vblank(dev_priv, pipe);
2239
2240                 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2241                         flip_done_handler(dev_priv, pipe);
2242
2243                 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2244                         hsw_pipe_crc_irq_handler(dev_priv, pipe);
2245
2246                 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2247                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2248
2249                 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2250                 if (fault_errors)
2251                         drm_err_ratelimited(&dev_priv->drm,
2252                                             "Fault errors on pipe %c: 0x%08x\n",
2253                                             pipe_name(pipe),
2254                                             fault_errors);
2255         }
2256
2257         if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2258             master_ctl & GEN8_DE_PCH_IRQ) {
2259                 u32 pica_iir;
2260
2261                 /*
2262                  * FIXME(BDW): Assume for now that the new interrupt handling
2263                  * scheme also closed the SDE interrupt handling race we've seen
2264                  * on older pch-split platforms. But this needs testing.
2265                  */
2266                 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
2267                 if (iir) {
2268                         ret = IRQ_HANDLED;
2269
2270                         if (pica_iir)
2271                                 xelpdp_pica_irq_handler(dev_priv, pica_iir);
2272
2273                         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2274                                 icp_irq_handler(dev_priv, iir);
2275                         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2276                                 spt_irq_handler(dev_priv, iir);
2277                         else
2278                                 cpt_irq_handler(dev_priv, iir);
2279                 } else {
2280                         /*
2281                          * Like on previous PCH there seems to be something
2282                          * fishy going on with forwarding PCH interrupts.
2283                          */
2284                         drm_dbg(&dev_priv->drm,
2285                                 "The master control interrupt lied (SDE)!\n");
2286                 }
2287         }
2288
2289         return ret;
2290 }
2291
2292 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2293 {
2294         raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2295
2296         /*
2297          * Now with master disabled, get a sample of level indications
2298          * for this interrupt. Indications will be cleared on related acks.
2299          * New indications can and will light up during processing,
2300          * and will generate new interrupt after enabling master.
2301          */
2302         return raw_reg_read(regs, GEN8_MASTER_IRQ);
2303 }
2304
2305 static inline void gen8_master_intr_enable(void __iomem * const regs)
2306 {
2307         raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2308 }
2309
2310 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2311 {
2312         struct drm_i915_private *dev_priv = arg;
2313         void __iomem * const regs = dev_priv->uncore.regs;
2314         u32 master_ctl;
2315
2316         if (!intel_irqs_enabled(dev_priv))
2317                 return IRQ_NONE;
2318
2319         master_ctl = gen8_master_intr_disable(regs);
2320         if (!master_ctl) {
2321                 gen8_master_intr_enable(regs);
2322                 return IRQ_NONE;
2323         }
2324
2325         /* Find, queue (onto bottom-halves), then clear each source */
2326         gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2327
2328         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2329         if (master_ctl & ~GEN8_GT_IRQS) {
2330                 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2331                 gen8_de_irq_handler(dev_priv, master_ctl);
2332                 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2333         }
2334
2335         gen8_master_intr_enable(regs);
2336
2337         pmu_irq_stats(dev_priv, IRQ_HANDLED);
2338
2339         return IRQ_HANDLED;
2340 }
2341
2342 static u32
2343 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2344 {
2345         void __iomem * const regs = i915->uncore.regs;
2346         u32 iir;
2347
2348         if (!(master_ctl & GEN11_GU_MISC_IRQ))
2349                 return 0;
2350
2351         iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2352         if (likely(iir))
2353                 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2354
2355         return iir;
2356 }
2357
2358 static void
2359 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2360 {
2361         if (iir & GEN11_GU_MISC_GSE)
2362                 intel_opregion_asle_intr(i915);
2363 }
2364
2365 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2366 {
2367         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2368
2369         /*
2370          * Now with master disabled, get a sample of level indications
2371          * for this interrupt. Indications will be cleared on related acks.
2372          * New indications can and will light up during processing,
2373          * and will generate new interrupt after enabling master.
2374          */
2375         return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2376 }
2377
2378 static inline void gen11_master_intr_enable(void __iomem * const regs)
2379 {
2380         raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2381 }
2382
2383 static void
2384 gen11_display_irq_handler(struct drm_i915_private *i915)
2385 {
2386         void __iomem * const regs = i915->uncore.regs;
2387         const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2388
2389         disable_rpm_wakeref_asserts(&i915->runtime_pm);
2390         /*
2391          * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2392          * for the display related bits.
2393          */
2394         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2395         gen8_de_irq_handler(i915, disp_ctl);
2396         raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2397                       GEN11_DISPLAY_IRQ_ENABLE);
2398
2399         enable_rpm_wakeref_asserts(&i915->runtime_pm);
2400 }
2401
2402 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2403 {
2404         struct drm_i915_private *i915 = arg;
2405         void __iomem * const regs = i915->uncore.regs;
2406         struct intel_gt *gt = to_gt(i915);
2407         u32 master_ctl;
2408         u32 gu_misc_iir;
2409
2410         if (!intel_irqs_enabled(i915))
2411                 return IRQ_NONE;
2412
2413         master_ctl = gen11_master_intr_disable(regs);
2414         if (!master_ctl) {
2415                 gen11_master_intr_enable(regs);
2416                 return IRQ_NONE;
2417         }
2418
2419         /* Find, queue (onto bottom-halves), then clear each source */
2420         gen11_gt_irq_handler(gt, master_ctl);
2421
2422         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2423         if (master_ctl & GEN11_DISPLAY_IRQ)
2424                 gen11_display_irq_handler(i915);
2425
2426         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2427
2428         gen11_master_intr_enable(regs);
2429
2430         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2431
2432         pmu_irq_stats(i915, IRQ_HANDLED);
2433
2434         return IRQ_HANDLED;
2435 }
2436
2437 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2438 {
2439         u32 val;
2440
2441         /* First disable interrupts */
2442         raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2443
2444         /* Get the indication levels and ack the master unit */
2445         val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2446         if (unlikely(!val))
2447                 return 0;
2448
2449         raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2450
2451         return val;
2452 }
2453
2454 static inline void dg1_master_intr_enable(void __iomem * const regs)
2455 {
2456         raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2457 }
2458
2459 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2460 {
2461         struct drm_i915_private * const i915 = arg;
2462         struct intel_gt *gt = to_gt(i915);
2463         void __iomem * const regs = gt->uncore->regs;
2464         u32 master_tile_ctl, master_ctl;
2465         u32 gu_misc_iir;
2466
2467         if (!intel_irqs_enabled(i915))
2468                 return IRQ_NONE;
2469
2470         master_tile_ctl = dg1_master_intr_disable(regs);
2471         if (!master_tile_ctl) {
2472                 dg1_master_intr_enable(regs);
2473                 return IRQ_NONE;
2474         }
2475
2476         /* FIXME: we only support tile 0 for now. */
2477         if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2478                 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2479                 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2480         } else {
2481                 drm_err(&i915->drm, "Tile not supported: 0x%08x\n",
2482                         master_tile_ctl);
2483                 dg1_master_intr_enable(regs);
2484                 return IRQ_NONE;
2485         }
2486
2487         gen11_gt_irq_handler(gt, master_ctl);
2488
2489         if (master_ctl & GEN11_DISPLAY_IRQ)
2490                 gen11_display_irq_handler(i915);
2491
2492         gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2493
2494         dg1_master_intr_enable(regs);
2495
2496         gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2497
2498         pmu_irq_stats(i915, IRQ_HANDLED);
2499
2500         return IRQ_HANDLED;
2501 }
2502
2503 /* Called from drm generic code, passed 'crtc' which
2504  * we use as a pipe index
2505  */
2506 int i8xx_enable_vblank(struct drm_crtc *crtc)
2507 {
2508         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2509         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2510         unsigned long irqflags;
2511
2512         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2513         i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2514         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2515
2516         return 0;
2517 }
2518
2519 int i915gm_enable_vblank(struct drm_crtc *crtc)
2520 {
2521         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2522
2523         /*
2524          * Vblank interrupts fail to wake the device up from C2+.
2525          * Disabling render clock gating during C-states avoids
2526          * the problem. There is a small power cost so we do this
2527          * only when vblank interrupts are actually enabled.
2528          */
2529         if (dev_priv->vblank_enabled++ == 0)
2530                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2531
2532         return i8xx_enable_vblank(crtc);
2533 }
2534
2535 int i965_enable_vblank(struct drm_crtc *crtc)
2536 {
2537         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2538         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2539         unsigned long irqflags;
2540
2541         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2542         i915_enable_pipestat(dev_priv, pipe,
2543                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2544         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2545
2546         return 0;
2547 }
2548
2549 int ilk_enable_vblank(struct drm_crtc *crtc)
2550 {
2551         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2552         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2553         unsigned long irqflags;
2554         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2555                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2556
2557         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2558         ilk_enable_display_irq(dev_priv, bit);
2559         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2560
2561         /* Even though there is no DMC, frame counter can get stuck when
2562          * PSR is active as no frames are generated.
2563          */
2564         if (HAS_PSR(dev_priv))
2565                 drm_crtc_vblank_restore(crtc);
2566
2567         return 0;
2568 }
2569
2570 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2571                                    bool enable)
2572 {
2573         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2574         enum port port;
2575
2576         if (!(intel_crtc->mode_flags &
2577             (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2578                 return false;
2579
2580         /* for dual link cases we consider TE from slave */
2581         if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2582                 port = PORT_B;
2583         else
2584                 port = PORT_A;
2585
2586         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
2587                          enable ? 0 : DSI_TE_EVENT);
2588
2589         intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
2590
2591         return true;
2592 }
2593
2594 int bdw_enable_vblank(struct drm_crtc *_crtc)
2595 {
2596         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2597         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2598         enum pipe pipe = crtc->pipe;
2599         unsigned long irqflags;
2600
2601         if (gen11_dsi_configure_te(crtc, true))
2602                 return 0;
2603
2604         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2605         bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2606         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2607
2608         /* Even if there is no DMC, frame counter can get stuck when
2609          * PSR is active as no frames are generated, so check only for PSR.
2610          */
2611         if (HAS_PSR(dev_priv))
2612                 drm_crtc_vblank_restore(&crtc->base);
2613
2614         return 0;
2615 }
2616
2617 /* Called from drm generic code, passed 'crtc' which
2618  * we use as a pipe index
2619  */
2620 void i8xx_disable_vblank(struct drm_crtc *crtc)
2621 {
2622         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2623         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2624         unsigned long irqflags;
2625
2626         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2627         i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2628         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2629 }
2630
2631 void i915gm_disable_vblank(struct drm_crtc *crtc)
2632 {
2633         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2634
2635         i8xx_disable_vblank(crtc);
2636
2637         if (--dev_priv->vblank_enabled == 0)
2638                 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2639 }
2640
2641 void i965_disable_vblank(struct drm_crtc *crtc)
2642 {
2643         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2644         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2645         unsigned long irqflags;
2646
2647         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2648         i915_disable_pipestat(dev_priv, pipe,
2649                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2650         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2651 }
2652
2653 void ilk_disable_vblank(struct drm_crtc *crtc)
2654 {
2655         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2656         enum pipe pipe = to_intel_crtc(crtc)->pipe;
2657         unsigned long irqflags;
2658         u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2659                 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2660
2661         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2662         ilk_disable_display_irq(dev_priv, bit);
2663         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2664 }
2665
2666 void bdw_disable_vblank(struct drm_crtc *_crtc)
2667 {
2668         struct intel_crtc *crtc = to_intel_crtc(_crtc);
2669         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2670         enum pipe pipe = crtc->pipe;
2671         unsigned long irqflags;
2672
2673         if (gen11_dsi_configure_te(crtc, false))
2674                 return;
2675
2676         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2677         bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2678         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2679 }
2680
2681 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2682 {
2683         struct intel_uncore *uncore = &dev_priv->uncore;
2684
2685         if (HAS_PCH_NOP(dev_priv))
2686                 return;
2687
2688         GEN3_IRQ_RESET(uncore, SDE);
2689
2690         if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2691                 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
2692 }
2693
2694 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2695 {
2696         struct intel_uncore *uncore = &dev_priv->uncore;
2697
2698         if (IS_CHERRYVIEW(dev_priv))
2699                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2700         else
2701                 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
2702
2703         i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2704         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
2705
2706         i9xx_pipestat_irq_reset(dev_priv);
2707
2708         GEN3_IRQ_RESET(uncore, VLV_);
2709         dev_priv->irq_mask = ~0u;
2710 }
2711
2712 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2713 {
2714         struct intel_uncore *uncore = &dev_priv->uncore;
2715
2716         u32 pipestat_mask;
2717         u32 enable_mask;
2718         enum pipe pipe;
2719
2720         pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2721
2722         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2723         for_each_pipe(dev_priv, pipe)
2724                 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2725
2726         enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2727                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2728                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2729                 I915_LPE_PIPE_A_INTERRUPT |
2730                 I915_LPE_PIPE_B_INTERRUPT;
2731
2732         if (IS_CHERRYVIEW(dev_priv))
2733                 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2734                         I915_LPE_PIPE_C_INTERRUPT;
2735
2736         drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
2737
2738         dev_priv->irq_mask = ~enable_mask;
2739
2740         GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2741 }
2742
2743 /* drm_dma.h hooks
2744 */
2745 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2746 {
2747         struct intel_uncore *uncore = &dev_priv->uncore;
2748
2749         GEN3_IRQ_RESET(uncore, DE);
2750         dev_priv->irq_mask = ~0u;
2751
2752         if (GRAPHICS_VER(dev_priv) == 7)
2753                 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2754
2755         if (IS_HASWELL(dev_priv)) {
2756                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2757                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2758         }
2759
2760         gen5_gt_irq_reset(to_gt(dev_priv));
2761
2762         ibx_irq_reset(dev_priv);
2763 }
2764
2765 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2766 {
2767         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
2768         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
2769
2770         gen5_gt_irq_reset(to_gt(dev_priv));
2771
2772         spin_lock_irq(&dev_priv->irq_lock);
2773         if (dev_priv->display_irqs_enabled)
2774                 vlv_display_irq_reset(dev_priv);
2775         spin_unlock_irq(&dev_priv->irq_lock);
2776 }
2777
2778 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
2779 {
2780         struct intel_uncore *uncore = &dev_priv->uncore;
2781         enum pipe pipe;
2782
2783         if (!HAS_DISPLAY(dev_priv))
2784                 return;
2785
2786         intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2787         intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2788
2789         for_each_pipe(dev_priv, pipe)
2790                 if (intel_display_power_is_enabled(dev_priv,
2791                                                    POWER_DOMAIN_PIPE(pipe)))
2792                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2793
2794         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2795         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2796 }
2797
2798 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2799 {
2800         struct intel_uncore *uncore = &dev_priv->uncore;
2801
2802         gen8_master_intr_disable(uncore->regs);
2803
2804         gen8_gt_irq_reset(to_gt(dev_priv));
2805         gen8_display_irq_reset(dev_priv);
2806         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2807
2808         if (HAS_PCH_SPLIT(dev_priv))
2809                 ibx_irq_reset(dev_priv);
2810
2811 }
2812
2813 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2814 {
2815         struct intel_uncore *uncore = &dev_priv->uncore;
2816         enum pipe pipe;
2817         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
2818                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
2819
2820         if (!HAS_DISPLAY(dev_priv))
2821                 return;
2822
2823         intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2824
2825         if (DISPLAY_VER(dev_priv) >= 12) {
2826                 enum transcoder trans;
2827
2828                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
2829                         enum intel_display_power_domain domain;
2830
2831                         domain = POWER_DOMAIN_TRANSCODER(trans);
2832                         if (!intel_display_power_is_enabled(dev_priv, domain))
2833                                 continue;
2834
2835                         intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2836                         intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2837                 }
2838         } else {
2839                 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2840                 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2841         }
2842
2843         for_each_pipe(dev_priv, pipe)
2844                 if (intel_display_power_is_enabled(dev_priv,
2845                                                    POWER_DOMAIN_PIPE(pipe)))
2846                         GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2847
2848         GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2849         GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2850
2851         if (DISPLAY_VER(dev_priv) >= 14)
2852                 GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
2853         else
2854                 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2855
2856         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2857                 GEN3_IRQ_RESET(uncore, SDE);
2858 }
2859
2860 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2861 {
2862         struct intel_gt *gt = to_gt(dev_priv);
2863         struct intel_uncore *uncore = gt->uncore;
2864
2865         gen11_master_intr_disable(dev_priv->uncore.regs);
2866
2867         gen11_gt_irq_reset(gt);
2868         gen11_display_irq_reset(dev_priv);
2869
2870         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2871         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2872 }
2873
2874 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
2875 {
2876         struct intel_gt *gt = to_gt(dev_priv);
2877         struct intel_uncore *uncore = gt->uncore;
2878
2879         dg1_master_intr_disable(dev_priv->uncore.regs);
2880
2881         gen11_gt_irq_reset(gt);
2882         gen11_display_irq_reset(dev_priv);
2883
2884         GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2885         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2886 }
2887
2888 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2889                                      u8 pipe_mask)
2890 {
2891         struct intel_uncore *uncore = &dev_priv->uncore;
2892         u32 extra_ier = GEN8_PIPE_VBLANK |
2893                 gen8_de_pipe_underrun_mask(dev_priv) |
2894                 gen8_de_pipe_flip_done_mask(dev_priv);
2895         enum pipe pipe;
2896
2897         spin_lock_irq(&dev_priv->irq_lock);
2898
2899         if (!intel_irqs_enabled(dev_priv)) {
2900                 spin_unlock_irq(&dev_priv->irq_lock);
2901                 return;
2902         }
2903
2904         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2905                 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2906                                   dev_priv->de_irq_mask[pipe],
2907                                   ~dev_priv->de_irq_mask[pipe] | extra_ier);
2908
2909         spin_unlock_irq(&dev_priv->irq_lock);
2910 }
2911
2912 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2913                                      u8 pipe_mask)
2914 {
2915         struct intel_uncore *uncore = &dev_priv->uncore;
2916         enum pipe pipe;
2917
2918         spin_lock_irq(&dev_priv->irq_lock);
2919
2920         if (!intel_irqs_enabled(dev_priv)) {
2921                 spin_unlock_irq(&dev_priv->irq_lock);
2922                 return;
2923         }
2924
2925         for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2926                 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2927
2928         spin_unlock_irq(&dev_priv->irq_lock);
2929
2930         /* make sure we're done processing display irqs */
2931         intel_synchronize_irq(dev_priv);
2932 }
2933
2934 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2935 {
2936         struct intel_uncore *uncore = &dev_priv->uncore;
2937
2938         intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0);
2939         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
2940
2941         gen8_gt_irq_reset(to_gt(dev_priv));
2942
2943         GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2944
2945         spin_lock_irq(&dev_priv->irq_lock);
2946         if (dev_priv->display_irqs_enabled)
2947                 vlv_display_irq_reset(dev_priv);
2948         spin_unlock_irq(&dev_priv->irq_lock);
2949 }
2950
2951 static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
2952 {
2953         switch (hpd_pin) {
2954         case HPD_PORT_A:
2955                 return PORTA_HOTPLUG_ENABLE;
2956         case HPD_PORT_B:
2957                 return PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_MASK;
2958         case HPD_PORT_C:
2959                 return PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_MASK;
2960         case HPD_PORT_D:
2961                 return PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_MASK;
2962         default:
2963                 return 0;
2964         }
2965 }
2966
2967 static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
2968 {
2969         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2970
2971         switch (encoder->hpd_pin) {
2972         case HPD_PORT_A:
2973                 /*
2974                  * When CPU and PCH are on the same package, port A
2975                  * HPD must be enabled in both north and south.
2976                  */
2977                 return HAS_PCH_LPT_LP(i915) ?
2978                         PORTA_HOTPLUG_ENABLE : 0;
2979         case HPD_PORT_B:
2980                 return PORTB_HOTPLUG_ENABLE |
2981                         PORTB_PULSE_DURATION_2ms;
2982         case HPD_PORT_C:
2983                 return PORTC_HOTPLUG_ENABLE |
2984                         PORTC_PULSE_DURATION_2ms;
2985         case HPD_PORT_D:
2986                 return PORTD_HOTPLUG_ENABLE |
2987                         PORTD_PULSE_DURATION_2ms;
2988         default:
2989                 return 0;
2990         }
2991 }
2992
2993 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2994 {
2995         /*
2996          * Enable digital hotplug on the PCH, and configure the DP short pulse
2997          * duration to 2ms (which is the minimum in the Display Port spec).
2998          * The pulse duration bits are reserved on LPT+.
2999          */
3000         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3001                          intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
3002                          intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
3003 }
3004
3005 static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
3006 {
3007         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3008
3009         intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
3010                          ibx_hotplug_mask(encoder->hpd_pin),
3011                          ibx_hotplug_enables(encoder));
3012 }
3013
3014 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3015 {
3016         u32 hotplug_irqs, enabled_irqs;
3017
3018         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3019         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3020
3021         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3022
3023         ibx_hpd_detection_setup(dev_priv);
3024 }
3025
3026 static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
3027 {
3028         switch (hpd_pin) {
3029         case HPD_PORT_A:
3030         case HPD_PORT_B:
3031         case HPD_PORT_C:
3032         case HPD_PORT_D:
3033                 return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin);
3034         default:
3035                 return 0;
3036         }
3037 }
3038
3039 static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
3040 {
3041         return icp_ddi_hotplug_mask(encoder->hpd_pin);
3042 }
3043
3044 static u32 icp_tc_hotplug_mask(enum hpd_pin hpd_pin)
3045 {
3046         switch (hpd_pin) {
3047         case HPD_PORT_TC1:
3048         case HPD_PORT_TC2:
3049         case HPD_PORT_TC3:
3050         case HPD_PORT_TC4:
3051         case HPD_PORT_TC5:
3052         case HPD_PORT_TC6:
3053                 return ICP_TC_HPD_ENABLE(hpd_pin);
3054         default:
3055                 return 0;
3056         }
3057 }
3058
3059 static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
3060 {
3061         return icp_tc_hotplug_mask(encoder->hpd_pin);
3062 }
3063
3064 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3065 {
3066         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
3067                          intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
3068                          intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
3069 }
3070
3071 static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
3072 {
3073         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3074
3075         intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
3076                          icp_ddi_hotplug_mask(encoder->hpd_pin),
3077                          icp_ddi_hotplug_enables(encoder));
3078 }
3079
3080 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3081 {
3082         intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
3083                          intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
3084                          intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
3085 }
3086
3087 static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
3088 {
3089         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3090
3091         intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
3092                          icp_tc_hotplug_mask(encoder->hpd_pin),
3093                          icp_tc_hotplug_enables(encoder));
3094 }
3095
3096 static void icp_hpd_enable_detection(struct intel_encoder *encoder)
3097 {
3098         icp_ddi_hpd_enable_detection(encoder);
3099         icp_tc_hpd_enable_detection(encoder);
3100 }
3101
3102 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3103 {
3104         u32 hotplug_irqs, enabled_irqs;
3105
3106         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3107         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3108
3109         if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3110                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3111
3112         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3113
3114         icp_ddi_hpd_detection_setup(dev_priv);
3115         icp_tc_hpd_detection_setup(dev_priv);
3116 }
3117
3118 static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
3119 {
3120         switch (hpd_pin) {
3121         case HPD_PORT_TC1:
3122         case HPD_PORT_TC2:
3123         case HPD_PORT_TC3:
3124         case HPD_PORT_TC4:
3125         case HPD_PORT_TC5:
3126         case HPD_PORT_TC6:
3127                 return GEN11_HOTPLUG_CTL_ENABLE(hpd_pin);
3128         default:
3129                 return 0;
3130         }
3131 }
3132
3133 static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
3134 {
3135         return gen11_hotplug_mask(encoder->hpd_pin);
3136 }
3137
3138 static void dg1_hpd_invert(struct drm_i915_private *i915)
3139 {
3140         u32 val = (INVERT_DDIA_HPD |
3141                    INVERT_DDIB_HPD |
3142                    INVERT_DDIC_HPD |
3143                    INVERT_DDID_HPD);
3144         intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
3145 }
3146
3147 static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
3148 {
3149         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3150
3151         dg1_hpd_invert(i915);
3152         icp_hpd_enable_detection(encoder);
3153 }
3154
3155 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3156 {
3157         dg1_hpd_invert(dev_priv);
3158         icp_hpd_irq_setup(dev_priv);
3159 }
3160
3161 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3162 {
3163         intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
3164                          intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
3165                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3166 }
3167
3168 static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
3169 {
3170         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3171
3172         intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
3173                          gen11_hotplug_mask(encoder->hpd_pin),
3174                          gen11_hotplug_enables(encoder));
3175 }
3176
3177 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3178 {
3179         intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
3180                          intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
3181                          intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
3182 }
3183
3184 static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
3185 {
3186         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3187
3188         intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
3189                          gen11_hotplug_mask(encoder->hpd_pin),
3190                          gen11_hotplug_enables(encoder));
3191 }
3192
3193 static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
3194 {
3195         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3196
3197         gen11_tc_hpd_enable_detection(encoder);
3198         gen11_tbt_hpd_enable_detection(encoder);
3199
3200         if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
3201                 icp_hpd_enable_detection(encoder);
3202 }
3203
3204 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3205 {
3206         u32 hotplug_irqs, enabled_irqs;
3207
3208         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3209         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3210
3211         intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs,
3212                          ~enabled_irqs & hotplug_irqs);
3213         intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3214
3215         gen11_tc_hpd_detection_setup(dev_priv);
3216         gen11_tbt_hpd_detection_setup(dev_priv);
3217
3218         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3219                 icp_hpd_irq_setup(dev_priv);
3220 }
3221
3222 static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
3223 {
3224         switch (hpd_pin) {
3225         case HPD_PORT_A:
3226         case HPD_PORT_B:
3227                 return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin);
3228         default:
3229                 return 0;
3230         }
3231 }
3232
3233 static u32 mtp_ddi_hotplug_enables(struct intel_encoder *encoder)
3234 {
3235         return mtp_ddi_hotplug_mask(encoder->hpd_pin);
3236 }
3237
3238 static u32 mtp_tc_hotplug_mask(enum hpd_pin hpd_pin)
3239 {
3240         switch (hpd_pin) {
3241         case HPD_PORT_TC1:
3242         case HPD_PORT_TC2:
3243         case HPD_PORT_TC3:
3244         case HPD_PORT_TC4:
3245                 return ICP_TC_HPD_ENABLE(hpd_pin);
3246         default:
3247                 return 0;
3248         }
3249 }
3250
3251 static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
3252 {
3253         return mtp_tc_hotplug_mask(encoder->hpd_pin);
3254 }
3255
3256 static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
3257 {
3258         intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
3259                      intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
3260                      intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
3261 }
3262
3263 static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
3264 {
3265         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3266
3267         intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
3268                      mtp_ddi_hotplug_mask(encoder->hpd_pin),
3269                      mtp_ddi_hotplug_enables(encoder));
3270 }
3271
3272 static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
3273 {
3274         intel_de_rmw(i915, SHOTPLUG_CTL_TC,
3275                      intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
3276                      intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
3277 }
3278
3279 static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
3280 {
3281         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3282
3283         intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
3284                      mtp_tc_hotplug_mask(encoder->hpd_pin),
3285                      mtp_tc_hotplug_enables(encoder));
3286 }
3287
3288 static void mtp_hpd_invert(struct drm_i915_private *i915)
3289 {
3290         u32 val = (INVERT_DDIA_HPD |
3291                    INVERT_DDIB_HPD |
3292                    INVERT_DDIC_HPD |
3293                    INVERT_TC1_HPD |
3294                    INVERT_TC2_HPD |
3295                    INVERT_TC3_HPD |
3296                    INVERT_TC4_HPD |
3297                    INVERT_DDID_HPD_MTP |
3298                    INVERT_DDIE_HPD);
3299         intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
3300 }
3301
3302 static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
3303 {
3304         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3305
3306         mtp_hpd_invert(i915);
3307         mtp_ddi_hpd_enable_detection(encoder);
3308         mtp_tc_hpd_enable_detection(encoder);
3309 }
3310
3311 static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
3312 {
3313         u32 hotplug_irqs, enabled_irqs;
3314
3315         enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
3316         hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
3317
3318         intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3319
3320         mtp_hpd_invert(i915);
3321         ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
3322
3323         mtp_ddi_hpd_detection_setup(i915);
3324         mtp_tc_hpd_detection_setup(i915);
3325 }
3326
3327 static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
3328 {
3329         return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
3330 }
3331
3332 static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
3333                                              enum hpd_pin hpd_pin, bool enable)
3334 {
3335         u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
3336                 XELPDP_DP_ALT_HOTPLUG_ENABLE;
3337
3338         if (!is_xelpdp_pica_hpd_pin(hpd_pin))
3339                 return;
3340
3341         intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
3342                      mask, enable ? mask : 0);
3343 }
3344
3345 static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
3346 {
3347         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3348
3349         _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
3350 }
3351
3352 static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
3353 {
3354         struct intel_encoder *encoder;
3355         u32 available_pins = 0;
3356         enum hpd_pin pin;
3357
3358         BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
3359
3360         for_each_intel_encoder(&i915->drm, encoder)
3361                 available_pins |= BIT(encoder->hpd_pin);
3362
3363         for_each_hpd_pin(pin)
3364                 _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
3365 }
3366
3367 static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
3368 {
3369         xelpdp_pica_hpd_enable_detection(encoder);
3370         mtp_hpd_enable_detection(encoder);
3371 }
3372
3373 static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
3374 {
3375         u32 hotplug_irqs, enabled_irqs;
3376
3377         enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
3378         hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
3379
3380         intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
3381                      ~enabled_irqs & hotplug_irqs);
3382         intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
3383
3384         xelpdp_pica_hpd_detection_setup(i915);
3385
3386         if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
3387                 mtp_hpd_irq_setup(i915);
3388 }
3389
3390 static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
3391 {
3392         switch (hpd_pin) {
3393         case HPD_PORT_A:
3394                 return PORTA_HOTPLUG_ENABLE;
3395         case HPD_PORT_B:
3396                 return PORTB_HOTPLUG_ENABLE;
3397         case HPD_PORT_C:
3398                 return PORTC_HOTPLUG_ENABLE;
3399         case HPD_PORT_D:
3400                 return PORTD_HOTPLUG_ENABLE;
3401         default:
3402                 return 0;
3403         }
3404 }
3405
3406 static u32 spt_hotplug_enables(struct intel_encoder *encoder)
3407 {
3408         return spt_hotplug_mask(encoder->hpd_pin);
3409 }
3410
3411 static u32 spt_hotplug2_mask(enum hpd_pin hpd_pin)
3412 {
3413         switch (hpd_pin) {
3414         case HPD_PORT_E:
3415                 return PORTE_HOTPLUG_ENABLE;
3416         default:
3417                 return 0;
3418         }
3419 }
3420
3421 static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
3422 {
3423         return spt_hotplug2_mask(encoder->hpd_pin);
3424 }
3425
3426 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3427 {
3428         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3429         if (HAS_PCH_CNP(dev_priv)) {
3430                 intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK,
3431                                  CHASSIS_CLK_REQ_DURATION(0xf));
3432         }
3433
3434         /* Enable digital hotplug on the PCH */
3435         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3436                          intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
3437                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
3438
3439         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
3440                          intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
3441                          intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
3442 }
3443
3444 static void spt_hpd_enable_detection(struct intel_encoder *encoder)
3445 {
3446         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3447
3448         /* Display WA #1179 WaHardHangonHotPlug: cnp */
3449         if (HAS_PCH_CNP(i915)) {
3450                 intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
3451                                  CHASSIS_CLK_REQ_DURATION_MASK,
3452                                  CHASSIS_CLK_REQ_DURATION(0xf));
3453         }
3454
3455         intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
3456                          spt_hotplug_mask(encoder->hpd_pin),
3457                          spt_hotplug_enables(encoder));
3458
3459         intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
3460                          spt_hotplug2_mask(encoder->hpd_pin),
3461                          spt_hotplug2_enables(encoder));
3462 }
3463
3464 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3465 {
3466         u32 hotplug_irqs, enabled_irqs;
3467
3468         if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3469                 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3470
3471         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3472         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3473
3474         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3475
3476         spt_hpd_detection_setup(dev_priv);
3477 }
3478
3479 static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
3480 {
3481         switch (hpd_pin) {
3482         case HPD_PORT_A:
3483                 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3484                         DIGITAL_PORTA_PULSE_DURATION_MASK;
3485         default:
3486                 return 0;
3487         }
3488 }
3489
3490 static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
3491 {
3492         switch (encoder->hpd_pin) {
3493         case HPD_PORT_A:
3494                 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3495                         DIGITAL_PORTA_PULSE_DURATION_2ms;
3496         default:
3497                 return 0;
3498         }
3499 }
3500
3501 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3502 {
3503         /*
3504          * Enable digital hotplug on the CPU, and configure the DP short pulse
3505          * duration to 2ms (which is the minimum in the Display Port spec)
3506          * The pulse duration bits are reserved on HSW+.
3507          */
3508         intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3509                          intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
3510                          intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
3511 }
3512
3513 static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
3514 {
3515         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3516
3517         intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
3518                          ilk_hotplug_mask(encoder->hpd_pin),
3519                          ilk_hotplug_enables(encoder));
3520
3521         ibx_hpd_enable_detection(encoder);
3522 }
3523
3524 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3525 {
3526         u32 hotplug_irqs, enabled_irqs;
3527
3528         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3529         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3530
3531         if (DISPLAY_VER(dev_priv) >= 8)
3532                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3533         else
3534                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3535
3536         ilk_hpd_detection_setup(dev_priv);
3537
3538         ibx_hpd_irq_setup(dev_priv);
3539 }
3540
3541 static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
3542 {
3543         switch (hpd_pin) {
3544         case HPD_PORT_A:
3545                 return PORTA_HOTPLUG_ENABLE | BXT_DDIA_HPD_INVERT;
3546         case HPD_PORT_B:
3547                 return PORTB_HOTPLUG_ENABLE | BXT_DDIB_HPD_INVERT;
3548         case HPD_PORT_C:
3549                 return PORTC_HOTPLUG_ENABLE | BXT_DDIC_HPD_INVERT;
3550         default:
3551                 return 0;
3552         }
3553 }
3554
3555 static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
3556 {
3557         u32 hotplug;
3558
3559         switch (encoder->hpd_pin) {
3560         case HPD_PORT_A:
3561                 hotplug = PORTA_HOTPLUG_ENABLE;
3562                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3563                         hotplug |= BXT_DDIA_HPD_INVERT;
3564                 return hotplug;
3565         case HPD_PORT_B:
3566                 hotplug = PORTB_HOTPLUG_ENABLE;
3567                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3568                         hotplug |= BXT_DDIB_HPD_INVERT;
3569                 return hotplug;
3570         case HPD_PORT_C:
3571                 hotplug = PORTC_HOTPLUG_ENABLE;
3572                 if (intel_bios_encoder_hpd_invert(encoder->devdata))
3573                         hotplug |= BXT_DDIC_HPD_INVERT;
3574                 return hotplug;
3575         default:
3576                 return 0;
3577         }
3578 }
3579
3580 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3581 {
3582         intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
3583                          intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
3584                          intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
3585 }
3586
3587 static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
3588 {
3589         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3590
3591         intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
3592                          bxt_hotplug_mask(encoder->hpd_pin),
3593                          bxt_hotplug_enables(encoder));
3594 }
3595
3596 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3597 {
3598         u32 hotplug_irqs, enabled_irqs;
3599
3600         enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3601         hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3602
3603         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3604
3605         bxt_hpd_detection_setup(dev_priv);
3606 }
3607
3608 /*
3609  * SDEIER is also touched by the interrupt handler to work around missed PCH
3610  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3611  * instead we unconditionally enable all PCH interrupt sources here, but then
3612  * only unmask them as needed with SDEIMR.
3613  *
3614  * Note that we currently do this after installing the interrupt handler,
3615  * but before we enable the master interrupt. That should be sufficient
3616  * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3617  * interrupts could still race.
3618  */
3619 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3620 {
3621         struct intel_uncore *uncore = &dev_priv->uncore;
3622         u32 mask;
3623
3624         if (HAS_PCH_NOP(dev_priv))
3625                 return;
3626
3627         if (HAS_PCH_IBX(dev_priv))
3628                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3629         else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3630                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3631         else
3632                 mask = SDE_GMBUS_CPT;
3633
3634         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3635 }
3636
3637 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3638 {
3639         struct intel_uncore *uncore = &dev_priv->uncore;
3640         u32 display_mask, extra_mask;
3641
3642         if (GRAPHICS_VER(dev_priv) >= 7) {
3643                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3644                                 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3645                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3646                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3647                               DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3648                               DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3649                               DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3650                               DE_DP_A_HOTPLUG_IVB);
3651         } else {
3652                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3653                                 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3654                                 DE_PIPEA_CRC_DONE | DE_POISON);
3655                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3656                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3657                               DE_PLANE_FLIP_DONE(PLANE_A) |
3658                               DE_PLANE_FLIP_DONE(PLANE_B) |
3659                               DE_DP_A_HOTPLUG);
3660         }
3661
3662         if (IS_HASWELL(dev_priv)) {
3663                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3664                 display_mask |= DE_EDP_PSR_INT_HSW;
3665         }
3666
3667         if (IS_IRONLAKE_M(dev_priv))
3668                 extra_mask |= DE_PCU_EVENT;
3669
3670         dev_priv->irq_mask = ~display_mask;
3671
3672         ibx_irq_postinstall(dev_priv);
3673
3674         gen5_gt_irq_postinstall(to_gt(dev_priv));
3675
3676         GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3677                       display_mask | extra_mask);
3678 }
3679
3680 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3681 {
3682         lockdep_assert_held(&dev_priv->irq_lock);
3683
3684         if (dev_priv->display_irqs_enabled)
3685                 return;
3686
3687         dev_priv->display_irqs_enabled = true;
3688
3689         if (intel_irqs_enabled(dev_priv)) {
3690                 vlv_display_irq_reset(dev_priv);
3691                 vlv_display_irq_postinstall(dev_priv);
3692         }
3693 }
3694
3695 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3696 {
3697         lockdep_assert_held(&dev_priv->irq_lock);
3698
3699         if (!dev_priv->display_irqs_enabled)
3700                 return;
3701
3702         dev_priv->display_irqs_enabled = false;
3703
3704         if (intel_irqs_enabled(dev_priv))
3705                 vlv_display_irq_reset(dev_priv);
3706 }
3707
3708
3709 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3710 {
3711         gen5_gt_irq_postinstall(to_gt(dev_priv));
3712
3713         spin_lock_irq(&dev_priv->irq_lock);
3714         if (dev_priv->display_irqs_enabled)
3715                 vlv_display_irq_postinstall(dev_priv);
3716         spin_unlock_irq(&dev_priv->irq_lock);
3717
3718         intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3719         intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3720 }
3721
3722 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3723 {
3724         struct intel_uncore *uncore = &dev_priv->uncore;
3725
3726         u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3727                 GEN8_PIPE_CDCLK_CRC_DONE;
3728         u32 de_pipe_enables;
3729         u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3730         u32 de_port_enables;
3731         u32 de_misc_masked = GEN8_DE_EDP_PSR;
3732         u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3733                 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3734         enum pipe pipe;
3735
3736         if (!HAS_DISPLAY(dev_priv))
3737                 return;
3738
3739         if (DISPLAY_VER(dev_priv) <= 10)
3740                 de_misc_masked |= GEN8_DE_MISC_GSE;
3741
3742         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3743                 de_port_masked |= BXT_DE_PORT_GMBUS;
3744
3745         if (DISPLAY_VER(dev_priv) >= 11) {
3746                 enum port port;
3747
3748                 if (intel_bios_is_dsi_present(dev_priv, &port))
3749                         de_port_masked |= DSI0_TE | DSI1_TE;
3750         }
3751
3752         de_pipe_enables = de_pipe_masked |
3753                 GEN8_PIPE_VBLANK |
3754                 gen8_de_pipe_underrun_mask(dev_priv) |
3755                 gen8_de_pipe_flip_done_mask(dev_priv);
3756
3757         de_port_enables = de_port_masked;
3758         if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3759                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3760         else if (IS_BROADWELL(dev_priv))
3761                 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3762
3763         if (DISPLAY_VER(dev_priv) >= 12) {
3764                 enum transcoder trans;
3765
3766                 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3767                         enum intel_display_power_domain domain;
3768
3769                         domain = POWER_DOMAIN_TRANSCODER(trans);
3770                         if (!intel_display_power_is_enabled(dev_priv, domain))
3771                                 continue;
3772
3773                         gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3774                 }
3775         } else {
3776                 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3777         }
3778
3779         for_each_pipe(dev_priv, pipe) {
3780                 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3781
3782                 if (intel_display_power_is_enabled(dev_priv,
3783                                 POWER_DOMAIN_PIPE(pipe)))
3784                         GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3785                                           dev_priv->de_irq_mask[pipe],
3786                                           de_pipe_enables);
3787         }
3788
3789         GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3790         GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3791
3792         if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
3793                 u32 de_hpd_masked = 0;
3794                 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3795                                      GEN11_DE_TBT_HOTPLUG_MASK;
3796
3797                 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3798                               de_hpd_enables);
3799         }
3800 }
3801
3802 static void mtp_irq_postinstall(struct drm_i915_private *i915)
3803 {
3804         struct intel_uncore *uncore = &i915->uncore;
3805         u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
3806         u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
3807         u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
3808                              XELPDP_TBT_HOTPLUG_MASK;
3809
3810         GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
3811                       de_hpd_enables);
3812
3813         GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
3814 }
3815
3816 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3817 {
3818         struct intel_uncore *uncore = &dev_priv->uncore;
3819         u32 mask = SDE_GMBUS_ICP;
3820
3821         GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3822 }
3823
3824 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3825 {
3826         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3827                 icp_irq_postinstall(dev_priv);
3828         else if (HAS_PCH_SPLIT(dev_priv))
3829                 ibx_irq_postinstall(dev_priv);
3830
3831         gen8_gt_irq_postinstall(to_gt(dev_priv));
3832         gen8_de_irq_postinstall(dev_priv);
3833
3834         gen8_master_intr_enable(dev_priv->uncore.regs);
3835 }
3836
3837 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3838 {
3839         if (!HAS_DISPLAY(dev_priv))
3840                 return;
3841
3842         gen8_de_irq_postinstall(dev_priv);
3843
3844         intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3845                            GEN11_DISPLAY_IRQ_ENABLE);
3846 }
3847
3848 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3849 {
3850         struct intel_gt *gt = to_gt(dev_priv);
3851         struct intel_uncore *uncore = gt->uncore;
3852         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3853
3854         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3855                 icp_irq_postinstall(dev_priv);
3856
3857         gen11_gt_irq_postinstall(gt);
3858         gen11_de_irq_postinstall(dev_priv);
3859
3860         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3861
3862         gen11_master_intr_enable(uncore->regs);
3863         intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3864 }
3865
3866 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3867 {
3868         struct intel_gt *gt = to_gt(dev_priv);
3869         struct intel_uncore *uncore = gt->uncore;
3870         u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3871
3872         gen11_gt_irq_postinstall(gt);
3873
3874         GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3875
3876         if (HAS_DISPLAY(dev_priv)) {
3877                 if (DISPLAY_VER(dev_priv) >= 14)
3878                         mtp_irq_postinstall(dev_priv);
3879                 else
3880                         icp_irq_postinstall(dev_priv);
3881
3882                 gen8_de_irq_postinstall(dev_priv);
3883                 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3884                                    GEN11_DISPLAY_IRQ_ENABLE);
3885         }
3886
3887         dg1_master_intr_enable(uncore->regs);
3888         intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3889 }
3890
3891 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3892 {
3893         gen8_gt_irq_postinstall(to_gt(dev_priv));
3894
3895         spin_lock_irq(&dev_priv->irq_lock);
3896         if (dev_priv->display_irqs_enabled)
3897                 vlv_display_irq_postinstall(dev_priv);
3898         spin_unlock_irq(&dev_priv->irq_lock);
3899
3900         intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3901         intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3902 }
3903
3904 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3905 {
3906         struct intel_uncore *uncore = &dev_priv->uncore;
3907
3908         i9xx_pipestat_irq_reset(dev_priv);
3909
3910         gen2_irq_reset(uncore);
3911         dev_priv->irq_mask = ~0u;
3912 }
3913
3914 static u32 i9xx_error_mask(struct drm_i915_private *i915)
3915 {
3916         /*
3917          * On gen2/3 FBC generates (seemingly spurious)
3918          * display INVALID_GTT/INVALID_GTT_PTE table errors.
3919          *
3920          * Also gen3 bspec has this to say:
3921          * "DISPA_INVALID_GTT_PTE
3922          "  [DevNapa] : Reserved. This bit does not reflect the page
3923          "              table error for the display plane A."
3924          *
3925          * Unfortunately we can't mask off individual PGTBL_ER bits,
3926          * so we just have to mask off all page table errors via EMR.
3927          */
3928         if (HAS_FBC(i915))
3929                 return ~I915_ERROR_MEMORY_REFRESH;
3930         else
3931                 return ~(I915_ERROR_PAGE_TABLE |
3932                          I915_ERROR_MEMORY_REFRESH);
3933 }
3934
3935 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3936 {
3937         struct intel_uncore *uncore = &dev_priv->uncore;
3938         u16 enable_mask;
3939
3940         intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
3941
3942         /* Unmask the interrupts that we always want on. */
3943         dev_priv->irq_mask =
3944                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3945                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3946                   I915_MASTER_ERROR_INTERRUPT);
3947
3948         enable_mask =
3949                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3950                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3951                 I915_MASTER_ERROR_INTERRUPT |
3952                 I915_USER_INTERRUPT;
3953
3954         gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
3955
3956         /* Interrupt setup is already guaranteed to be single-threaded, this is
3957          * just to make the assert_spin_locked check happy. */
3958         spin_lock_irq(&dev_priv->irq_lock);
3959         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3960         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3961         spin_unlock_irq(&dev_priv->irq_lock);
3962 }
3963
3964 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3965                                u16 *eir, u16 *eir_stuck)
3966 {
3967         struct intel_uncore *uncore = &i915->uncore;
3968         u16 emr;
3969
3970         *eir = intel_uncore_read16(uncore, EIR);
3971         intel_uncore_write16(uncore, EIR, *eir);
3972
3973         *eir_stuck = intel_uncore_read16(uncore, EIR);
3974         if (*eir_stuck == 0)
3975                 return;
3976
3977         /*
3978          * Toggle all EMR bits to make sure we get an edge
3979          * in the ISR master error bit if we don't clear
3980          * all the EIR bits. Otherwise the edge triggered
3981          * IIR on i965/g4x wouldn't notice that an interrupt
3982          * is still pending. Also some EIR bits can't be
3983          * cleared except by handling the underlying error
3984          * (or by a GPU reset) so we mask any bit that
3985          * remains set.
3986          */
3987         emr = intel_uncore_read16(uncore, EMR);
3988         intel_uncore_write16(uncore, EMR, 0xffff);
3989         intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3990 }
3991
3992 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3993                                    u16 eir, u16 eir_stuck)
3994 {
3995         drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
3996
3997         if (eir_stuck)
3998                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
3999                         eir_stuck);
4000
4001         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
4002                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
4003 }
4004
4005 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4006                                u32 *eir, u32 *eir_stuck)
4007 {
4008         u32 emr;
4009
4010         *eir = intel_uncore_read(&dev_priv->uncore, EIR);
4011         intel_uncore_write(&dev_priv->uncore, EIR, *eir);
4012
4013         *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
4014         if (*eir_stuck == 0)
4015                 return;
4016
4017         /*
4018          * Toggle all EMR bits to make sure we get an edge
4019          * in the ISR master error bit if we don't clear
4020          * all the EIR bits. Otherwise the edge triggered
4021          * IIR on i965/g4x wouldn't notice that an interrupt
4022          * is still pending. Also some EIR bits can't be
4023          * cleared except by handling the underlying error
4024          * (or by a GPU reset) so we mask any bit that
4025          * remains set.
4026          */
4027         emr = intel_uncore_read(&dev_priv->uncore, EMR);
4028         intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
4029         intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
4030 }
4031
4032 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4033                                    u32 eir, u32 eir_stuck)
4034 {
4035         drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir);
4036
4037         if (eir_stuck)
4038                 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
4039                         eir_stuck);
4040
4041         drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
4042                 intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
4043 }
4044
4045 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4046 {
4047         struct drm_i915_private *dev_priv = arg;
4048         irqreturn_t ret = IRQ_NONE;
4049
4050         if (!intel_irqs_enabled(dev_priv))
4051                 return IRQ_NONE;
4052
4053         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4054         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4055
4056         do {
4057                 u32 pipe_stats[I915_MAX_PIPES] = {};
4058                 u16 eir = 0, eir_stuck = 0;
4059                 u16 iir;
4060
4061                 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4062                 if (iir == 0)
4063                         break;
4064
4065                 ret = IRQ_HANDLED;
4066
4067                 /* Call regardless, as some status bits might not be
4068                  * signalled in iir */
4069                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4070
4071                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4072                         i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4073
4074                 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4075
4076                 if (iir & I915_USER_INTERRUPT)
4077                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4078
4079                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4080                         i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4081
4082                 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4083         } while (0);
4084
4085         pmu_irq_stats(dev_priv, ret);
4086
4087         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4088
4089         return ret;
4090 }
4091
4092 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4093 {
4094         struct intel_uncore *uncore = &dev_priv->uncore;
4095
4096         if (I915_HAS_HOTPLUG(dev_priv)) {
4097                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4098                 intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0);
4099         }
4100
4101         i9xx_pipestat_irq_reset(dev_priv);
4102
4103         GEN3_IRQ_RESET(uncore, GEN2_);
4104         dev_priv->irq_mask = ~0u;
4105 }
4106
4107 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4108 {
4109         struct intel_uncore *uncore = &dev_priv->uncore;
4110         u32 enable_mask;
4111
4112         intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
4113
4114         /* Unmask the interrupts that we always want on. */
4115         dev_priv->irq_mask =
4116                 ~(I915_ASLE_INTERRUPT |
4117                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4118                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4119                   I915_MASTER_ERROR_INTERRUPT);
4120
4121         enable_mask =
4122                 I915_ASLE_INTERRUPT |
4123                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4124                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4125                 I915_MASTER_ERROR_INTERRUPT |
4126                 I915_USER_INTERRUPT;
4127
4128         if (I915_HAS_HOTPLUG(dev_priv)) {
4129                 /* Enable in IER... */
4130                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4131                 /* and unmask in IMR */
4132                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4133         }
4134
4135         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4136
4137         /* Interrupt setup is already guaranteed to be single-threaded, this is
4138          * just to make the assert_spin_locked check happy. */
4139         spin_lock_irq(&dev_priv->irq_lock);
4140         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4141         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4142         spin_unlock_irq(&dev_priv->irq_lock);
4143
4144         i915_enable_asle_pipestat(dev_priv);
4145 }
4146
4147 static irqreturn_t i915_irq_handler(int irq, void *arg)
4148 {
4149         struct drm_i915_private *dev_priv = arg;
4150         irqreturn_t ret = IRQ_NONE;
4151
4152         if (!intel_irqs_enabled(dev_priv))
4153                 return IRQ_NONE;
4154
4155         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4156         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4157
4158         do {
4159                 u32 pipe_stats[I915_MAX_PIPES] = {};
4160                 u32 eir = 0, eir_stuck = 0;
4161                 u32 hotplug_status = 0;
4162                 u32 iir;
4163
4164                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4165                 if (iir == 0)
4166                         break;
4167
4168                 ret = IRQ_HANDLED;
4169
4170                 if (I915_HAS_HOTPLUG(dev_priv) &&
4171                     iir & I915_DISPLAY_PORT_INTERRUPT)
4172                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4173
4174                 /* Call regardless, as some status bits might not be
4175                  * signalled in iir */
4176                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4177
4178                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4179                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4180
4181                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4182
4183                 if (iir & I915_USER_INTERRUPT)
4184                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4185
4186                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4187                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4188
4189                 if (hotplug_status)
4190                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4191
4192                 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4193         } while (0);
4194
4195         pmu_irq_stats(dev_priv, ret);
4196
4197         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4198
4199         return ret;
4200 }
4201
4202 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4203 {
4204         struct intel_uncore *uncore = &dev_priv->uncore;
4205
4206         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4207         intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
4208
4209         i9xx_pipestat_irq_reset(dev_priv);
4210
4211         GEN3_IRQ_RESET(uncore, GEN2_);
4212         dev_priv->irq_mask = ~0u;
4213 }
4214
4215 static u32 i965_error_mask(struct drm_i915_private *i915)
4216 {
4217         /*
4218          * Enable some error detection, note the instruction error mask
4219          * bit is reserved, so we leave it masked.
4220          *
4221          * i965 FBC no longer generates spurious GTT errors,
4222          * so we can always enable the page table errors.
4223          */
4224         if (IS_G4X(i915))
4225                 return ~(GM45_ERROR_PAGE_TABLE |
4226                          GM45_ERROR_MEM_PRIV |
4227                          GM45_ERROR_CP_PRIV |
4228                          I915_ERROR_MEMORY_REFRESH);
4229         else
4230                 return ~(I915_ERROR_PAGE_TABLE |
4231                          I915_ERROR_MEMORY_REFRESH);
4232 }
4233
4234 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4235 {
4236         struct intel_uncore *uncore = &dev_priv->uncore;
4237         u32 enable_mask;
4238
4239         intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
4240
4241         /* Unmask the interrupts that we always want on. */
4242         dev_priv->irq_mask =
4243                 ~(I915_ASLE_INTERRUPT |
4244                   I915_DISPLAY_PORT_INTERRUPT |
4245                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4246                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4247                   I915_MASTER_ERROR_INTERRUPT);
4248
4249         enable_mask =
4250                 I915_ASLE_INTERRUPT |
4251                 I915_DISPLAY_PORT_INTERRUPT |
4252                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4253                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4254                 I915_MASTER_ERROR_INTERRUPT |
4255                 I915_USER_INTERRUPT;
4256
4257         if (IS_G4X(dev_priv))
4258                 enable_mask |= I915_BSD_USER_INTERRUPT;
4259
4260         GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4261
4262         /* Interrupt setup is already guaranteed to be single-threaded, this is
4263          * just to make the assert_spin_locked check happy. */
4264         spin_lock_irq(&dev_priv->irq_lock);
4265         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4266         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4267         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4268         spin_unlock_irq(&dev_priv->irq_lock);
4269
4270         i915_enable_asle_pipestat(dev_priv);
4271 }
4272
4273 static void i915_hpd_enable_detection(struct intel_encoder *encoder)
4274 {
4275         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4276         u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
4277
4278         /* HPD sense and interrupt enable are one and the same */
4279         i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
4280 }
4281
4282 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4283 {
4284         u32 hotplug_en;
4285
4286         lockdep_assert_held(&dev_priv->irq_lock);
4287
4288         /* Note HDMI and DP share hotplug bits */
4289         /* enable bits are the same for all generations */
4290         hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4291         /* Programming the CRT detection parameters tends
4292            to generate a spurious hotplug event about three
4293            seconds later.  So just do it once.
4294         */
4295         if (IS_G4X(dev_priv))
4296                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4297         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4298
4299         /* Ignore TV since it's buggy */
4300         i915_hotplug_interrupt_update_locked(dev_priv,
4301                                              HOTPLUG_INT_EN_MASK |
4302                                              CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4303                                              CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4304                                              hotplug_en);
4305 }
4306
4307 static irqreturn_t i965_irq_handler(int irq, void *arg)
4308 {
4309         struct drm_i915_private *dev_priv = arg;
4310         irqreturn_t ret = IRQ_NONE;
4311
4312         if (!intel_irqs_enabled(dev_priv))
4313                 return IRQ_NONE;
4314
4315         /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4316         disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4317
4318         do {
4319                 u32 pipe_stats[I915_MAX_PIPES] = {};
4320                 u32 eir = 0, eir_stuck = 0;
4321                 u32 hotplug_status = 0;
4322                 u32 iir;
4323
4324                 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4325                 if (iir == 0)
4326                         break;
4327
4328                 ret = IRQ_HANDLED;
4329
4330                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4331                         hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4332
4333                 /* Call regardless, as some status bits might not be
4334                  * signalled in iir */
4335                 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4336
4337                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4338                         i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4339
4340                 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4341
4342                 if (iir & I915_USER_INTERRUPT)
4343                         intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4344                                             iir);
4345
4346                 if (iir & I915_BSD_USER_INTERRUPT)
4347                         intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4348                                             iir >> 25);
4349
4350                 if (iir & I915_MASTER_ERROR_INTERRUPT)
4351                         i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4352
4353                 if (hotplug_status)
4354                         i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4355
4356                 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4357         } while (0);
4358
4359         pmu_irq_stats(dev_priv, IRQ_HANDLED);
4360
4361         enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4362
4363         return ret;
4364 }
4365
4366 struct intel_hotplug_funcs {
4367         /* Enable HPD sense and interrupts for all present encoders */
4368         void (*hpd_irq_setup)(struct drm_i915_private *i915);
4369         /* Enable HPD sense for a single encoder */
4370         void (*hpd_enable_detection)(struct intel_encoder *encoder);
4371 };
4372
4373 #define HPD_FUNCS(platform)                                      \
4374 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4375         .hpd_irq_setup = platform##_hpd_irq_setup,               \
4376         .hpd_enable_detection = platform##_hpd_enable_detection, \
4377 }
4378
4379 HPD_FUNCS(i915);
4380 HPD_FUNCS(xelpdp);
4381 HPD_FUNCS(dg1);
4382 HPD_FUNCS(gen11);
4383 HPD_FUNCS(bxt);
4384 HPD_FUNCS(icp);
4385 HPD_FUNCS(spt);
4386 HPD_FUNCS(ilk);
4387 #undef HPD_FUNCS
4388
4389 void intel_hpd_enable_detection(struct intel_encoder *encoder)
4390 {
4391         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4392
4393         if (i915->display.funcs.hotplug)
4394                 i915->display.funcs.hotplug->hpd_enable_detection(encoder);
4395 }
4396
4397 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4398 {
4399         if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4400                 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4401 }
4402
4403 /**
4404  * intel_irq_init - initializes irq support
4405  * @dev_priv: i915 device instance
4406  *
4407  * This function initializes all the irq support including work items, timers
4408  * and all the vtables. It does not setup the interrupt itself though.
4409  */
4410 void intel_irq_init(struct drm_i915_private *dev_priv)
4411 {
4412         int i;
4413
4414         INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4415         for (i = 0; i < MAX_L3_SLICES; ++i)
4416                 dev_priv->l3_parity.remap_info[i] = NULL;
4417
4418         /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4419         if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4420                 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4421
4422         if (!HAS_DISPLAY(dev_priv))
4423                 return;
4424
4425         intel_hpd_init_pins(dev_priv);
4426
4427         intel_hpd_init_early(dev_priv);
4428
4429         dev_priv->drm.vblank_disable_immediate = true;
4430
4431         /* Most platforms treat the display irq block as an always-on
4432          * power domain. vlv/chv can disable it at runtime and need
4433          * special care to avoid writing any of the display block registers
4434          * outside of the power domain. We defer setting up the display irqs
4435          * in this case to the runtime pm.
4436          */
4437         dev_priv->display_irqs_enabled = true;
4438         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4439                 dev_priv->display_irqs_enabled = false;
4440
4441         if (HAS_GMCH(dev_priv)) {
4442                 if (I915_HAS_HOTPLUG(dev_priv))
4443                         dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4444         } else {
4445                 if (HAS_PCH_DG2(dev_priv))
4446                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4447                 else if (HAS_PCH_DG1(dev_priv))
4448                         dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4449                 else if (DISPLAY_VER(dev_priv) >= 14)
4450                         dev_priv->display.funcs.hotplug = &xelpdp_hpd_funcs;
4451                 else if (DISPLAY_VER(dev_priv) >= 11)
4452                         dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4453                 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4454                         dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4455                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4456                         dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4457                 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4458                         dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4459                 else
4460                         dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4461         }
4462 }
4463
4464 /**
4465  * intel_irq_fini - deinitializes IRQ support
4466  * @i915: i915 device instance
4467  *
4468  * This function deinitializes all the IRQ support.
4469  */
4470 void intel_irq_fini(struct drm_i915_private *i915)
4471 {
4472         int i;
4473
4474         for (i = 0; i < MAX_L3_SLICES; ++i)
4475                 kfree(i915->l3_parity.remap_info[i]);
4476 }
4477
4478 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4479 {
4480         if (HAS_GMCH(dev_priv)) {
4481                 if (IS_CHERRYVIEW(dev_priv))
4482                         return cherryview_irq_handler;
4483                 else if (IS_VALLEYVIEW(dev_priv))
4484                         return valleyview_irq_handler;
4485                 else if (GRAPHICS_VER(dev_priv) == 4)
4486                         return i965_irq_handler;
4487                 else if (GRAPHICS_VER(dev_priv) == 3)
4488                         return i915_irq_handler;
4489                 else
4490                         return i8xx_irq_handler;
4491         } else {
4492                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4493                         return dg1_irq_handler;
4494                 else if (GRAPHICS_VER(dev_priv) >= 11)
4495                         return gen11_irq_handler;
4496                 else if (GRAPHICS_VER(dev_priv) >= 8)
4497                         return gen8_irq_handler;
4498                 else
4499                         return ilk_irq_handler;
4500         }
4501 }
4502
4503 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4504 {
4505         if (HAS_GMCH(dev_priv)) {
4506                 if (IS_CHERRYVIEW(dev_priv))
4507                         cherryview_irq_reset(dev_priv);
4508                 else if (IS_VALLEYVIEW(dev_priv))
4509                         valleyview_irq_reset(dev_priv);
4510                 else if (GRAPHICS_VER(dev_priv) == 4)
4511                         i965_irq_reset(dev_priv);
4512                 else if (GRAPHICS_VER(dev_priv) == 3)
4513                         i915_irq_reset(dev_priv);
4514                 else
4515                         i8xx_irq_reset(dev_priv);
4516         } else {
4517                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4518                         dg1_irq_reset(dev_priv);
4519                 else if (GRAPHICS_VER(dev_priv) >= 11)
4520                         gen11_irq_reset(dev_priv);
4521                 else if (GRAPHICS_VER(dev_priv) >= 8)
4522                         gen8_irq_reset(dev_priv);
4523                 else
4524                         ilk_irq_reset(dev_priv);
4525         }
4526 }
4527
4528 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4529 {
4530         if (HAS_GMCH(dev_priv)) {
4531                 if (IS_CHERRYVIEW(dev_priv))
4532                         cherryview_irq_postinstall(dev_priv);
4533                 else if (IS_VALLEYVIEW(dev_priv))
4534                         valleyview_irq_postinstall(dev_priv);
4535                 else if (GRAPHICS_VER(dev_priv) == 4)
4536                         i965_irq_postinstall(dev_priv);
4537                 else if (GRAPHICS_VER(dev_priv) == 3)
4538                         i915_irq_postinstall(dev_priv);
4539                 else
4540                         i8xx_irq_postinstall(dev_priv);
4541         } else {
4542                 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4543                         dg1_irq_postinstall(dev_priv);
4544                 else if (GRAPHICS_VER(dev_priv) >= 11)
4545                         gen11_irq_postinstall(dev_priv);
4546                 else if (GRAPHICS_VER(dev_priv) >= 8)
4547                         gen8_irq_postinstall(dev_priv);
4548                 else
4549                         ilk_irq_postinstall(dev_priv);
4550         }
4551 }
4552
4553 /**
4554  * intel_irq_install - enables the hardware interrupt
4555  * @dev_priv: i915 device instance
4556  *
4557  * This function enables the hardware interrupt handling, but leaves the hotplug
4558  * handling still disabled. It is called after intel_irq_init().
4559  *
4560  * In the driver load and resume code we need working interrupts in a few places
4561  * but don't want to deal with the hassle of concurrent probe and hotplug
4562  * workers. Hence the split into this two-stage approach.
4563  */
4564 int intel_irq_install(struct drm_i915_private *dev_priv)
4565 {
4566         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4567         int ret;
4568
4569         /*
4570          * We enable some interrupt sources in our postinstall hooks, so mark
4571          * interrupts as enabled _before_ actually enabling them to avoid
4572          * special cases in our ordering checks.
4573          */
4574         dev_priv->runtime_pm.irqs_enabled = true;
4575
4576         dev_priv->irq_enabled = true;
4577
4578         intel_irq_reset(dev_priv);
4579
4580         ret = request_irq(irq, intel_irq_handler(dev_priv),
4581                           IRQF_SHARED, DRIVER_NAME, dev_priv);
4582         if (ret < 0) {
4583                 dev_priv->irq_enabled = false;
4584                 return ret;
4585         }
4586
4587         intel_irq_postinstall(dev_priv);
4588
4589         return ret;
4590 }
4591
4592 /**
4593  * intel_irq_uninstall - finilizes all irq handling
4594  * @dev_priv: i915 device instance
4595  *
4596  * This stops interrupt and hotplug handling and unregisters and frees all
4597  * resources acquired in the init functions.
4598  */
4599 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4600 {
4601         int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4602
4603         /*
4604          * FIXME we can get called twice during driver probe
4605          * error handling as well as during driver remove due to
4606          * intel_display_driver_remove() calling us out of sequence.
4607          * Would be nice if it didn't do that...
4608          */
4609         if (!dev_priv->irq_enabled)
4610                 return;
4611
4612         dev_priv->irq_enabled = false;
4613
4614         intel_irq_reset(dev_priv);
4615
4616         free_irq(irq, dev_priv);
4617
4618         intel_hpd_cancel_work(dev_priv);
4619         dev_priv->runtime_pm.irqs_enabled = false;
4620 }
4621
4622 /**
4623  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4624  * @dev_priv: i915 device instance
4625  *
4626  * This function is used to disable interrupts at runtime, both in the runtime
4627  * pm and the system suspend/resume code.
4628  */
4629 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4630 {
4631         intel_irq_reset(dev_priv);
4632         dev_priv->runtime_pm.irqs_enabled = false;
4633         intel_synchronize_irq(dev_priv);
4634 }
4635
4636 /**
4637  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4638  * @dev_priv: i915 device instance
4639  *
4640  * This function is used to enable interrupts at runtime, both in the runtime
4641  * pm and the system suspend/resume code.
4642  */
4643 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4644 {
4645         dev_priv->runtime_pm.irqs_enabled = true;
4646         intel_irq_reset(dev_priv);
4647         intel_irq_postinstall(dev_priv);
4648 }
4649
4650 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4651 {
4652         return dev_priv->runtime_pm.irqs_enabled;
4653 }
4654
4655 void intel_synchronize_irq(struct drm_i915_private *i915)
4656 {
4657         synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4658 }
4659
4660 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4661 {
4662         synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4663 }