1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
112 I915_WRITE((reg), 0xffffffff); \
114 I915_WRITE((reg), 0xffffffff); \
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
133 /* For display hotplug interrupt */
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
137 assert_spin_locked(&dev_priv->irq_lock);
139 if (WARN_ON(dev_priv->pm.irqs_disabled))
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
152 assert_spin_locked(&dev_priv->irq_lock);
154 if (WARN_ON(dev_priv->pm.irqs_disabled))
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
174 assert_spin_locked(&dev_priv->irq_lock);
176 if (WARN_ON(dev_priv->pm.irqs_disabled))
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
185 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
187 ilk_update_gt_irq(dev_priv, mask, mask);
190 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
192 ilk_update_gt_irq(dev_priv, mask, 0);
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
207 assert_spin_locked(&dev_priv->irq_lock);
209 if (WARN_ON(dev_priv->pm.irqs_disabled))
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
223 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
225 snb_update_pm_irq(dev_priv, mask, mask);
228 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
230 snb_update_pm_irq(dev_priv, mask, 0);
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
239 assert_spin_locked(&dev_priv->irq_lock);
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
244 if (crtc->cpu_fifo_underrun_disabled)
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
257 * Copied from the snb function, updated with relevant register offsets
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
265 assert_spin_locked(&dev_priv->irq_lock);
267 if (WARN_ON(dev_priv->pm.irqs_disabled))
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
281 void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
283 bdw_update_pm_irq(dev_priv, mask, mask);
286 void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
288 bdw_update_pm_irq(dev_priv, mask, 0);
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
293 struct drm_i915_private *dev_priv = dev->dev_private;
295 struct intel_crtc *crtc;
297 assert_spin_locked(&dev_priv->irq_lock);
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
302 if (crtc->pch_fifo_underrun_disabled)
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
321 if (crtc->cpu_fifo_underrun_disabled)
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
339 bool enable, bool old)
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
345 assert_spin_locked(&dev_priv->irq_lock);
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
364 ironlake_enable_display_irq(dev_priv, bit);
366 ironlake_disable_display_irq(dev_priv, bit);
369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
371 bool enable, bool old)
373 struct drm_i915_private *dev_priv = dev->dev_private;
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
377 if (!ivb_can_enable_err_int(dev))
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
395 struct drm_i915_private *dev_priv = dev->dev_private;
397 assert_spin_locked(&dev_priv->irq_lock);
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
421 assert_spin_locked(&dev_priv->irq_lock);
423 if (WARN_ON(dev_priv->pm.irqs_disabled))
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
429 #define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431 #define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
443 ibx_enable_display_interrupt(dev_priv, bit);
445 ibx_disable_display_interrupt(dev_priv, bit);
448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
452 struct drm_i915_private *dev_priv = dev->dev_private;
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
458 if (!cpt_can_enable_serr_int(dev))
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
485 * Returns the previous state of underrun reporting.
487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
495 assert_spin_locked(&dev_priv->irq_lock);
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
515 struct drm_i915_private *dev_priv = dev->dev_private;
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533 return !intel_crtc->cpu_fifo_underrun_disabled;
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
548 * Returns the previous state of underrun reporting.
550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
588 u32 reg = PIPESTAT(pipe);
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
591 assert_spin_locked(&dev_priv->irq_lock);
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
599 if ((pipestat & enable_mask) == enable_mask)
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
617 assert_spin_locked(&dev_priv->irq_lock);
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
625 if ((pipestat & enable_mask) == 0)
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
630 pipestat &= ~enable_mask;
631 I915_WRITE(reg, pipestat);
635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
637 u32 enable_mask = status_mask << 16;
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
673 enable_mask = status_mask << 16;
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
687 enable_mask = status_mask << 16;
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 static void i915_enable_asle_pipestat(struct drm_device *dev)
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
713 * i915_pipe_enabled - check if a pipe is enabled
715 * @pipe: pipe to check
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
724 struct drm_i915_private *dev_priv = dev->dev_private;
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
731 return intel_crtc->active;
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
741 * Assumptions about the fictitious mode used in this example:
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
756 * | | start of vsync:
757 * | | generate vsync interrupt
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
777 * vbs = vblank_start (number)
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
789 /* Gen2 doesn't have a hardware frame counter */
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
831 /* Convert to pixel count */
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
874 return I915_READ(reg);
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
901 return (position + crtc->scanline_offset) % vtotal;
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
916 unsigned long irqflags;
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
947 /* Get optional system timestamp before query. */
949 *stime = ktime_get();
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
955 position = __intel_get_crtc_scanline(intel_crtc);
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
963 /* convert to pixel counts */
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
977 if (position >= vtotal)
978 position = vtotal - 1;
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
989 position = (position + htotal - hsync_start) % vtotal;
992 /* Get optional system timestamp after query. */
994 *etime = ktime_get();
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1000 in_vbl = position >= vbl_start && position < vbl_end;
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1011 position += vtotal - vbl_end;
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1023 ret |= DRM_SCANOUTPOS_INVBL;
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1043 struct timeval *vblank_time,
1046 struct drm_crtc *crtc;
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1075 enum drm_connector_status old_status;
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1093 static void i915_digport_work_func(struct work_struct *work)
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1117 if (long_port_mask & (1 << i)) {
1120 } else if (short_port_mask & (1 << i))
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1141 * Handle hotplug events outside the interrupt handler proper.
1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1145 static void i915_hotplug_work_func(struct work_struct *work)
1147 struct drm_i915_private *dev_priv =
1148 container_of(work, struct drm_i915_private, hotplug_work);
1149 struct drm_device *dev = dev_priv->dev;
1150 struct drm_mode_config *mode_config = &dev->mode_config;
1151 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false;
1156 bool changed = false;
1159 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1164 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0;
1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1170 intel_encoder = intel_connector->encoder;
1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1173 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT;
1180 hpd_disabled = true;
1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector->name, intel_encoder->hpd_pin);
1187 /* if there were no outputs to poll, poll was disabled,
1188 * therefore make sure it's enabled when disabling HPD on
1189 * some connectors */
1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1202 intel_encoder = intel_connector->encoder;
1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1204 if (intel_encoder->hot_plug)
1205 intel_encoder->hot_plug(intel_encoder);
1206 if (intel_hpd_irq_event(dev, connector))
1210 mutex_unlock(&mode_config->mutex);
1213 drm_kms_helper_hotplug_event(dev);
1216 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1221 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1223 struct drm_i915_private *dev_priv = dev->dev_private;
1224 u32 busy_up, busy_down, max_avg, min_avg;
1227 spin_lock(&mchdev_lock);
1229 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1231 new_delay = dev_priv->ips.cur_delay;
1233 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1234 busy_up = I915_READ(RCPREVBSYTUPAVG);
1235 busy_down = I915_READ(RCPREVBSYTDNAVG);
1236 max_avg = I915_READ(RCBMAXAVG);
1237 min_avg = I915_READ(RCBMINAVG);
1239 /* Handle RCS change request from hw */
1240 if (busy_up > max_avg) {
1241 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1242 new_delay = dev_priv->ips.cur_delay - 1;
1243 if (new_delay < dev_priv->ips.max_delay)
1244 new_delay = dev_priv->ips.max_delay;
1245 } else if (busy_down < min_avg) {
1246 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1247 new_delay = dev_priv->ips.cur_delay + 1;
1248 if (new_delay > dev_priv->ips.min_delay)
1249 new_delay = dev_priv->ips.min_delay;
1252 if (ironlake_set_drps(dev, new_delay))
1253 dev_priv->ips.cur_delay = new_delay;
1255 spin_unlock(&mchdev_lock);
1260 static void notify_ring(struct drm_device *dev,
1261 struct intel_engine_cs *ring)
1263 if (!intel_ring_initialized(ring))
1266 trace_i915_gem_request_complete(ring);
1268 if (drm_core_check_feature(dev, DRIVER_MODESET))
1269 intel_notify_mmio_flip(ring);
1271 wake_up_all(&ring->irq_queue);
1272 i915_queue_hangcheck(dev);
1275 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1276 struct intel_rps_ei *rps_ei)
1278 u32 cz_ts, cz_freq_khz;
1279 u32 render_count, media_count;
1280 u32 elapsed_render, elapsed_media, elapsed_time;
1283 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1284 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1286 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1287 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1289 if (rps_ei->cz_clock == 0) {
1290 rps_ei->cz_clock = cz_ts;
1291 rps_ei->render_c0 = render_count;
1292 rps_ei->media_c0 = media_count;
1294 return dev_priv->rps.cur_freq;
1297 elapsed_time = cz_ts - rps_ei->cz_clock;
1298 rps_ei->cz_clock = cz_ts;
1300 elapsed_render = render_count - rps_ei->render_c0;
1301 rps_ei->render_c0 = render_count;
1303 elapsed_media = media_count - rps_ei->media_c0;
1304 rps_ei->media_c0 = media_count;
1306 /* Convert all the counters into common unit of milli sec */
1307 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1308 elapsed_render /= cz_freq_khz;
1309 elapsed_media /= cz_freq_khz;
1312 * Calculate overall C0 residency percentage
1313 * only if elapsed time is non zero
1317 ((max(elapsed_render, elapsed_media) * 100)
1325 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1326 * busy-ness calculated from C0 counters of render & media power wells
1327 * @dev_priv: DRM device private
1330 static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1332 u32 residency_C0_up = 0, residency_C0_down = 0;
1335 dev_priv->rps.ei_interrupt_count++;
1337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1340 if (dev_priv->rps.up_ei.cz_clock == 0) {
1341 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1342 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1343 return dev_priv->rps.cur_freq;
1348 * To down throttle, C0 residency should be less than down threshold
1349 * for continous EI intervals. So calculate down EI counters
1350 * once in VLV_INT_COUNT_FOR_DOWN_EI
1352 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1354 dev_priv->rps.ei_interrupt_count = 0;
1356 residency_C0_down = vlv_c0_residency(dev_priv,
1357 &dev_priv->rps.down_ei);
1359 residency_C0_up = vlv_c0_residency(dev_priv,
1360 &dev_priv->rps.up_ei);
1363 new_delay = dev_priv->rps.cur_freq;
1365 adj = dev_priv->rps.last_adj;
1366 /* C0 residency is greater than UP threshold. Increase Frequency */
1367 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1373 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1374 new_delay = dev_priv->rps.cur_freq + adj;
1377 * For better performance, jump directly
1378 * to RPe if we're below it.
1380 if (new_delay < dev_priv->rps.efficient_freq)
1381 new_delay = dev_priv->rps.efficient_freq;
1383 } else if (!dev_priv->rps.ei_interrupt_count &&
1384 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1390 * This means, C0 residency is less than down threshold over
1391 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1393 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1394 new_delay = dev_priv->rps.cur_freq + adj;
1400 static void gen6_pm_rps_work(struct work_struct *work)
1402 struct drm_i915_private *dev_priv =
1403 container_of(work, struct drm_i915_private, rps.work);
1407 spin_lock_irq(&dev_priv->irq_lock);
1408 pm_iir = dev_priv->rps.pm_iir;
1409 dev_priv->rps.pm_iir = 0;
1410 if (IS_BROADWELL(dev_priv->dev))
1411 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1414 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1416 spin_unlock_irq(&dev_priv->irq_lock);
1418 /* Make sure we didn't queue anything we're not going to process. */
1419 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1421 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1424 mutex_lock(&dev_priv->rps.hw_lock);
1426 adj = dev_priv->rps.last_adj;
1427 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1431 /* CHV needs even encode values */
1432 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1434 new_delay = dev_priv->rps.cur_freq + adj;
1437 * For better performance, jump directly
1438 * to RPe if we're below it.
1440 if (new_delay < dev_priv->rps.efficient_freq)
1441 new_delay = dev_priv->rps.efficient_freq;
1442 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1443 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1444 new_delay = dev_priv->rps.efficient_freq;
1446 new_delay = dev_priv->rps.min_freq_softlimit;
1448 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1449 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1450 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1454 /* CHV needs even encode values */
1455 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1457 new_delay = dev_priv->rps.cur_freq + adj;
1458 } else { /* unknown event */
1459 new_delay = dev_priv->rps.cur_freq;
1462 /* sysfs frequency interfaces may have snuck in while servicing the
1465 new_delay = clamp_t(int, new_delay,
1466 dev_priv->rps.min_freq_softlimit,
1467 dev_priv->rps.max_freq_softlimit);
1469 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1471 if (IS_VALLEYVIEW(dev_priv->dev))
1472 valleyview_set_rps(dev_priv->dev, new_delay);
1474 gen6_set_rps(dev_priv->dev, new_delay);
1476 mutex_unlock(&dev_priv->rps.hw_lock);
1481 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1483 * @work: workqueue struct
1485 * Doesn't actually do anything except notify userspace. As a consequence of
1486 * this event, userspace should try to remap the bad rows since statistically
1487 * it is likely the same row is more likely to go bad again.
1489 static void ivybridge_parity_work(struct work_struct *work)
1491 struct drm_i915_private *dev_priv =
1492 container_of(work, struct drm_i915_private, l3_parity.error_work);
1493 u32 error_status, row, bank, subbank;
1494 char *parity_event[6];
1496 unsigned long flags;
1499 /* We must turn off DOP level clock gating to access the L3 registers.
1500 * In order to prevent a get/put style interface, acquire struct mutex
1501 * any time we access those registers.
1503 mutex_lock(&dev_priv->dev->struct_mutex);
1505 /* If we've screwed up tracking, just let the interrupt fire again */
1506 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1509 misccpctl = I915_READ(GEN7_MISCCPCTL);
1510 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1511 POSTING_READ(GEN7_MISCCPCTL);
1513 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1517 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1520 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1522 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1524 error_status = I915_READ(reg);
1525 row = GEN7_PARITY_ERROR_ROW(error_status);
1526 bank = GEN7_PARITY_ERROR_BANK(error_status);
1527 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1529 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1532 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1533 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1534 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1535 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1536 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1537 parity_event[5] = NULL;
1539 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1540 KOBJ_CHANGE, parity_event);
1542 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1543 slice, row, bank, subbank);
1545 kfree(parity_event[4]);
1546 kfree(parity_event[3]);
1547 kfree(parity_event[2]);
1548 kfree(parity_event[1]);
1551 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1554 WARN_ON(dev_priv->l3_parity.which_slice);
1555 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1556 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1559 mutex_unlock(&dev_priv->dev->struct_mutex);
1562 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1564 struct drm_i915_private *dev_priv = dev->dev_private;
1566 if (!HAS_L3_DPF(dev))
1569 spin_lock(&dev_priv->irq_lock);
1570 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1571 spin_unlock(&dev_priv->irq_lock);
1573 iir &= GT_PARITY_ERROR(dev);
1574 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1575 dev_priv->l3_parity.which_slice |= 1 << 1;
1577 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1578 dev_priv->l3_parity.which_slice |= 1 << 0;
1580 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1583 static void ilk_gt_irq_handler(struct drm_device *dev,
1584 struct drm_i915_private *dev_priv,
1588 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1589 notify_ring(dev, &dev_priv->ring[RCS]);
1590 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1591 notify_ring(dev, &dev_priv->ring[VCS]);
1594 static void snb_gt_irq_handler(struct drm_device *dev,
1595 struct drm_i915_private *dev_priv,
1600 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1601 notify_ring(dev, &dev_priv->ring[RCS]);
1602 if (gt_iir & GT_BSD_USER_INTERRUPT)
1603 notify_ring(dev, &dev_priv->ring[VCS]);
1604 if (gt_iir & GT_BLT_USER_INTERRUPT)
1605 notify_ring(dev, &dev_priv->ring[BCS]);
1607 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1608 GT_BSD_CS_ERROR_INTERRUPT |
1609 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1610 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1614 if (gt_iir & GT_PARITY_ERROR(dev))
1615 ivybridge_parity_error_irq_handler(dev, gt_iir);
1618 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1620 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1623 spin_lock(&dev_priv->irq_lock);
1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1625 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 spin_unlock(&dev_priv->irq_lock);
1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
1631 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1632 struct drm_i915_private *dev_priv,
1637 irqreturn_t ret = IRQ_NONE;
1639 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1640 tmp = I915_READ(GEN8_GT_IIR(0));
1642 I915_WRITE(GEN8_GT_IIR(0), tmp);
1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1646 if (rcs & GT_RENDER_USER_INTERRUPT)
1647 notify_ring(dev, &dev_priv->ring[RCS]);
1648 if (bcs & GT_RENDER_USER_INTERRUPT)
1649 notify_ring(dev, &dev_priv->ring[BCS]);
1651 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1654 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1655 tmp = I915_READ(GEN8_GT_IIR(1));
1657 I915_WRITE(GEN8_GT_IIR(1), tmp);
1659 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1660 if (vcs & GT_RENDER_USER_INTERRUPT)
1661 notify_ring(dev, &dev_priv->ring[VCS]);
1662 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1663 if (vcs & GT_RENDER_USER_INTERRUPT)
1664 notify_ring(dev, &dev_priv->ring[VCS2]);
1666 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1669 if (master_ctl & GEN8_GT_PM_IRQ) {
1670 tmp = I915_READ(GEN8_GT_IIR(2));
1671 if (tmp & dev_priv->pm_rps_events) {
1672 I915_WRITE(GEN8_GT_IIR(2),
1673 tmp & dev_priv->pm_rps_events);
1675 gen8_rps_irq_handler(dev_priv, tmp);
1677 DRM_ERROR("The master control interrupt lied (PM)!\n");
1680 if (master_ctl & GEN8_GT_VECS_IRQ) {
1681 tmp = I915_READ(GEN8_GT_IIR(3));
1683 I915_WRITE(GEN8_GT_IIR(3), tmp);
1685 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1686 if (vcs & GT_RENDER_USER_INTERRUPT)
1687 notify_ring(dev, &dev_priv->ring[VECS]);
1689 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1695 #define HPD_STORM_DETECT_PERIOD 1000
1696 #define HPD_STORM_THRESHOLD 5
1698 static int ilk_port_to_hotplug_shift(enum port port)
1714 static int g4x_port_to_hotplug_shift(enum port port)
1730 static inline enum port get_port_from_pin(enum hpd_pin pin)
1740 return PORT_A; /* no hpd */
1744 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1745 u32 hotplug_trigger,
1746 u32 dig_hotplug_reg,
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1752 bool storm_detected = false;
1753 bool queue_dig = false, queue_hp = false;
1755 u32 dig_port_mask = 0;
1757 if (!hotplug_trigger)
1760 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1761 hotplug_trigger, dig_hotplug_reg);
1763 spin_lock(&dev_priv->irq_lock);
1764 for (i = 1; i < HPD_NUM_PINS; i++) {
1765 if (!(hpd[i] & hotplug_trigger))
1768 port = get_port_from_pin(i);
1769 if (port && dev_priv->hpd_irq_port[port]) {
1773 dig_shift = g4x_port_to_hotplug_shift(port);
1774 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1776 dig_shift = ilk_port_to_hotplug_shift(port);
1777 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1780 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
1781 /* for long HPD pulses we want to have the digital queue happen,
1782 but we still want HPD storm detection to function. */
1784 dev_priv->long_hpd_port_mask |= (1 << port);
1785 dig_port_mask |= hpd[i];
1787 /* for short HPD just trigger the digital queue */
1788 dev_priv->short_hpd_port_mask |= (1 << port);
1789 hotplug_trigger &= ~hpd[i];
1795 for (i = 1; i < HPD_NUM_PINS; i++) {
1796 if (hpd[i] & hotplug_trigger &&
1797 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1799 * On GMCH platforms the interrupt mask bits only
1800 * prevent irq generation, not the setting of the
1801 * hotplug bits itself. So only WARN about unexpected
1802 * interrupts on saner platforms.
1804 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1805 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1806 hotplug_trigger, i, hpd[i]);
1811 if (!(hpd[i] & hotplug_trigger) ||
1812 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1815 if (!(dig_port_mask & hpd[i])) {
1816 dev_priv->hpd_event_bits |= (1 << i);
1820 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1821 dev_priv->hpd_stats[i].hpd_last_jiffies
1822 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1823 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1824 dev_priv->hpd_stats[i].hpd_cnt = 0;
1825 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1826 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1827 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1828 dev_priv->hpd_event_bits &= ~(1 << i);
1829 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1830 storm_detected = true;
1832 dev_priv->hpd_stats[i].hpd_cnt++;
1833 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1834 dev_priv->hpd_stats[i].hpd_cnt);
1839 dev_priv->display.hpd_irq_setup(dev);
1840 spin_unlock(&dev_priv->irq_lock);
1843 * Our hotplug handler can grab modeset locks (by calling down into the
1844 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1845 * queue for otherwise the flush_work in the pageflip code will
1849 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1851 schedule_work(&dev_priv->hotplug_work);
1854 static void gmbus_irq_handler(struct drm_device *dev)
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1858 wake_up_all(&dev_priv->gmbus_wait_queue);
1861 static void dp_aux_irq_handler(struct drm_device *dev)
1863 struct drm_i915_private *dev_priv = dev->dev_private;
1865 wake_up_all(&dev_priv->gmbus_wait_queue);
1868 #if defined(CONFIG_DEBUG_FS)
1869 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1870 uint32_t crc0, uint32_t crc1,
1871 uint32_t crc2, uint32_t crc3,
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1875 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1876 struct intel_pipe_crc_entry *entry;
1879 spin_lock(&pipe_crc->lock);
1881 if (!pipe_crc->entries) {
1882 spin_unlock(&pipe_crc->lock);
1883 DRM_ERROR("spurious interrupt\n");
1887 head = pipe_crc->head;
1888 tail = pipe_crc->tail;
1890 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1891 spin_unlock(&pipe_crc->lock);
1892 DRM_ERROR("CRC buffer overflowing\n");
1896 entry = &pipe_crc->entries[head];
1898 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1899 entry->crc[0] = crc0;
1900 entry->crc[1] = crc1;
1901 entry->crc[2] = crc2;
1902 entry->crc[3] = crc3;
1903 entry->crc[4] = crc4;
1905 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1906 pipe_crc->head = head;
1908 spin_unlock(&pipe_crc->lock);
1910 wake_up_interruptible(&pipe_crc->wq);
1914 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1915 uint32_t crc0, uint32_t crc1,
1916 uint32_t crc2, uint32_t crc3,
1921 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1923 struct drm_i915_private *dev_priv = dev->dev_private;
1925 display_pipe_crc_irq_handler(dev, pipe,
1926 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1930 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1932 struct drm_i915_private *dev_priv = dev->dev_private;
1934 display_pipe_crc_irq_handler(dev, pipe,
1935 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1936 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1937 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1938 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1939 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1942 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1944 struct drm_i915_private *dev_priv = dev->dev_private;
1945 uint32_t res1, res2;
1947 if (INTEL_INFO(dev)->gen >= 3)
1948 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1952 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1953 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1957 display_pipe_crc_irq_handler(dev, pipe,
1958 I915_READ(PIPE_CRC_RES_RED(pipe)),
1959 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1960 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1964 /* The RPS events need forcewake, so we add them to a work queue and mask their
1965 * IMR bits until the work is done. Other interrupts can be processed without
1966 * the work queue. */
1967 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1969 if (pm_iir & dev_priv->pm_rps_events) {
1970 spin_lock(&dev_priv->irq_lock);
1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1972 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1973 spin_unlock(&dev_priv->irq_lock);
1975 queue_work(dev_priv->wq, &dev_priv->rps.work);
1978 if (HAS_VEBOX(dev_priv->dev)) {
1979 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1980 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1982 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1983 i915_handle_error(dev_priv->dev, false,
1984 "VEBOX CS error interrupt 0x%08x",
1990 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1992 struct intel_crtc *crtc;
1994 if (!drm_handle_vblank(dev, pipe))
1997 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1998 wake_up(&crtc->vbl_wait);
2003 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2006 u32 pipe_stats[I915_MAX_PIPES] = { };
2009 spin_lock(&dev_priv->irq_lock);
2010 for_each_pipe(pipe) {
2012 u32 mask, iir_bit = 0;
2015 * PIPESTAT bits get signalled even when the interrupt is
2016 * disabled with the mask bits, and some of the status bits do
2017 * not generate interrupts at all (like the underrun bit). Hence
2018 * we need to be careful that we only handle what we want to
2022 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2023 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2027 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2030 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2033 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2037 mask |= dev_priv->pipestat_irq_mask[pipe];
2042 reg = PIPESTAT(pipe);
2043 mask |= PIPESTAT_INT_ENABLE_MASK;
2044 pipe_stats[pipe] = I915_READ(reg) & mask;
2047 * Clear the PIPE*STAT regs before the IIR
2049 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2050 PIPESTAT_INT_STATUS_MASK))
2051 I915_WRITE(reg, pipe_stats[pipe]);
2053 spin_unlock(&dev_priv->irq_lock);
2055 for_each_pipe(pipe) {
2056 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2057 intel_pipe_handle_vblank(dev, pipe);
2059 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2060 intel_prepare_page_flip(dev, pipe);
2061 intel_finish_page_flip(dev, pipe);
2064 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2065 i9xx_pipe_crc_irq_handler(dev, pipe);
2067 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2068 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2069 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2072 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2073 gmbus_irq_handler(dev);
2076 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2078 struct drm_i915_private *dev_priv = dev->dev_private;
2079 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2081 if (hotplug_status) {
2082 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2084 * Make sure hotplug status is cleared before we clear IIR, or else we
2085 * may miss hotplug events.
2087 POSTING_READ(PORT_HOTPLUG_STAT);
2090 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2092 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2094 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2096 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2099 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2100 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2101 dp_aux_irq_handler(dev);
2105 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2107 struct drm_device *dev = arg;
2108 struct drm_i915_private *dev_priv = dev->dev_private;
2109 u32 iir, gt_iir, pm_iir;
2110 irqreturn_t ret = IRQ_NONE;
2113 /* Find, clear, then process each source of interrupt */
2115 gt_iir = I915_READ(GTIIR);
2117 I915_WRITE(GTIIR, gt_iir);
2119 pm_iir = I915_READ(GEN6_PMIIR);
2121 I915_WRITE(GEN6_PMIIR, pm_iir);
2123 iir = I915_READ(VLV_IIR);
2125 /* Consume port before clearing IIR or we'll miss events */
2126 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2127 i9xx_hpd_irq_handler(dev);
2128 I915_WRITE(VLV_IIR, iir);
2131 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2137 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2139 gen6_rps_irq_handler(dev_priv, pm_iir);
2140 /* Call regardless, as some status bits might not be
2141 * signalled in iir */
2142 valleyview_pipestat_irq_handler(dev, iir);
2149 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2151 struct drm_device *dev = arg;
2152 struct drm_i915_private *dev_priv = dev->dev_private;
2153 u32 master_ctl, iir;
2154 irqreturn_t ret = IRQ_NONE;
2157 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2158 iir = I915_READ(VLV_IIR);
2160 if (master_ctl == 0 && iir == 0)
2165 I915_WRITE(GEN8_MASTER_IRQ, 0);
2167 /* Find, clear, then process each source of interrupt */
2170 /* Consume port before clearing IIR or we'll miss events */
2171 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2172 i9xx_hpd_irq_handler(dev);
2173 I915_WRITE(VLV_IIR, iir);
2176 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2178 /* Call regardless, as some status bits might not be
2179 * signalled in iir */
2180 valleyview_pipestat_irq_handler(dev, iir);
2182 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2183 POSTING_READ(GEN8_MASTER_IRQ);
2189 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2193 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2194 u32 dig_hotplug_reg;
2196 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2197 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2199 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2201 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2202 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2203 SDE_AUDIO_POWER_SHIFT);
2204 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2208 if (pch_iir & SDE_AUX_MASK)
2209 dp_aux_irq_handler(dev);
2211 if (pch_iir & SDE_GMBUS)
2212 gmbus_irq_handler(dev);
2214 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2215 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2217 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2218 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2220 if (pch_iir & SDE_POISON)
2221 DRM_ERROR("PCH poison interrupt\n");
2223 if (pch_iir & SDE_FDI_MASK)
2225 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2227 I915_READ(FDI_RX_IIR(pipe)));
2229 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2230 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2232 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2233 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2235 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2236 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2238 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2240 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2241 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2243 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2246 static void ivb_err_int_handler(struct drm_device *dev)
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2249 u32 err_int = I915_READ(GEN7_ERR_INT);
2252 if (err_int & ERR_INT_POISON)
2253 DRM_ERROR("Poison interrupt\n");
2255 for_each_pipe(pipe) {
2256 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2257 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2259 DRM_ERROR("Pipe %c FIFO underrun\n",
2263 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2264 if (IS_IVYBRIDGE(dev))
2265 ivb_pipe_crc_irq_handler(dev, pipe);
2267 hsw_pipe_crc_irq_handler(dev, pipe);
2271 I915_WRITE(GEN7_ERR_INT, err_int);
2274 static void cpt_serr_int_handler(struct drm_device *dev)
2276 struct drm_i915_private *dev_priv = dev->dev_private;
2277 u32 serr_int = I915_READ(SERR_INT);
2279 if (serr_int & SERR_INT_POISON)
2280 DRM_ERROR("PCH poison interrupt\n");
2282 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2283 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2285 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2287 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2288 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2290 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2292 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2293 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2295 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2297 I915_WRITE(SERR_INT, serr_int);
2300 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2302 struct drm_i915_private *dev_priv = dev->dev_private;
2304 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2305 u32 dig_hotplug_reg;
2307 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2308 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2310 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2312 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2313 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2314 SDE_AUDIO_POWER_SHIFT_CPT);
2315 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2319 if (pch_iir & SDE_AUX_MASK_CPT)
2320 dp_aux_irq_handler(dev);
2322 if (pch_iir & SDE_GMBUS_CPT)
2323 gmbus_irq_handler(dev);
2325 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2326 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2328 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2329 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2331 if (pch_iir & SDE_FDI_MASK_CPT)
2333 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2335 I915_READ(FDI_RX_IIR(pipe)));
2337 if (pch_iir & SDE_ERROR_CPT)
2338 cpt_serr_int_handler(dev);
2341 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2343 struct drm_i915_private *dev_priv = dev->dev_private;
2346 if (de_iir & DE_AUX_CHANNEL_A)
2347 dp_aux_irq_handler(dev);
2349 if (de_iir & DE_GSE)
2350 intel_opregion_asle_intr(dev);
2352 if (de_iir & DE_POISON)
2353 DRM_ERROR("Poison interrupt\n");
2355 for_each_pipe(pipe) {
2356 if (de_iir & DE_PIPE_VBLANK(pipe))
2357 intel_pipe_handle_vblank(dev, pipe);
2359 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2360 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2361 DRM_ERROR("Pipe %c FIFO underrun\n",
2364 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2365 i9xx_pipe_crc_irq_handler(dev, pipe);
2367 /* plane/pipes map 1:1 on ilk+ */
2368 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2369 intel_prepare_page_flip(dev, pipe);
2370 intel_finish_page_flip_plane(dev, pipe);
2374 /* check event from PCH */
2375 if (de_iir & DE_PCH_EVENT) {
2376 u32 pch_iir = I915_READ(SDEIIR);
2378 if (HAS_PCH_CPT(dev))
2379 cpt_irq_handler(dev, pch_iir);
2381 ibx_irq_handler(dev, pch_iir);
2383 /* should clear PCH hotplug event before clear CPU irq */
2384 I915_WRITE(SDEIIR, pch_iir);
2387 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2388 ironlake_rps_change_irq_handler(dev);
2391 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2393 struct drm_i915_private *dev_priv = dev->dev_private;
2396 if (de_iir & DE_ERR_INT_IVB)
2397 ivb_err_int_handler(dev);
2399 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2400 dp_aux_irq_handler(dev);
2402 if (de_iir & DE_GSE_IVB)
2403 intel_opregion_asle_intr(dev);
2405 for_each_pipe(pipe) {
2406 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2407 intel_pipe_handle_vblank(dev, pipe);
2409 /* plane/pipes map 1:1 on ilk+ */
2410 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2411 intel_prepare_page_flip(dev, pipe);
2412 intel_finish_page_flip_plane(dev, pipe);
2416 /* check event from PCH */
2417 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2418 u32 pch_iir = I915_READ(SDEIIR);
2420 cpt_irq_handler(dev, pch_iir);
2422 /* clear PCH hotplug event before clear CPU irq */
2423 I915_WRITE(SDEIIR, pch_iir);
2428 * To handle irqs with the minimum potential races with fresh interrupts, we:
2429 * 1 - Disable Master Interrupt Control.
2430 * 2 - Find the source(s) of the interrupt.
2431 * 3 - Clear the Interrupt Identity bits (IIR).
2432 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2433 * 5 - Re-enable Master Interrupt Control.
2435 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2437 struct drm_device *dev = arg;
2438 struct drm_i915_private *dev_priv = dev->dev_private;
2439 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2440 irqreturn_t ret = IRQ_NONE;
2442 /* We get interrupts on unclaimed registers, so check for this before we
2443 * do any I915_{READ,WRITE}. */
2444 intel_uncore_check_errors(dev);
2446 /* disable master interrupt before clearing iir */
2447 de_ier = I915_READ(DEIER);
2448 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2449 POSTING_READ(DEIER);
2451 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2452 * interrupts will will be stored on its back queue, and then we'll be
2453 * able to process them after we restore SDEIER (as soon as we restore
2454 * it, we'll get an interrupt if SDEIIR still has something to process
2455 * due to its back queue). */
2456 if (!HAS_PCH_NOP(dev)) {
2457 sde_ier = I915_READ(SDEIER);
2458 I915_WRITE(SDEIER, 0);
2459 POSTING_READ(SDEIER);
2462 /* Find, clear, then process each source of interrupt */
2464 gt_iir = I915_READ(GTIIR);
2466 I915_WRITE(GTIIR, gt_iir);
2468 if (INTEL_INFO(dev)->gen >= 6)
2469 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2471 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2474 de_iir = I915_READ(DEIIR);
2476 I915_WRITE(DEIIR, de_iir);
2478 if (INTEL_INFO(dev)->gen >= 7)
2479 ivb_display_irq_handler(dev, de_iir);
2481 ilk_display_irq_handler(dev, de_iir);
2484 if (INTEL_INFO(dev)->gen >= 6) {
2485 u32 pm_iir = I915_READ(GEN6_PMIIR);
2487 I915_WRITE(GEN6_PMIIR, pm_iir);
2489 gen6_rps_irq_handler(dev_priv, pm_iir);
2493 I915_WRITE(DEIER, de_ier);
2494 POSTING_READ(DEIER);
2495 if (!HAS_PCH_NOP(dev)) {
2496 I915_WRITE(SDEIER, sde_ier);
2497 POSTING_READ(SDEIER);
2503 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2505 struct drm_device *dev = arg;
2506 struct drm_i915_private *dev_priv = dev->dev_private;
2508 irqreturn_t ret = IRQ_NONE;
2512 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2513 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2517 I915_WRITE(GEN8_MASTER_IRQ, 0);
2518 POSTING_READ(GEN8_MASTER_IRQ);
2520 /* Find, clear, then process each source of interrupt */
2522 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2524 if (master_ctl & GEN8_DE_MISC_IRQ) {
2525 tmp = I915_READ(GEN8_DE_MISC_IIR);
2527 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2529 if (tmp & GEN8_DE_MISC_GSE)
2530 intel_opregion_asle_intr(dev);
2532 DRM_ERROR("Unexpected DE Misc interrupt\n");
2535 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2538 if (master_ctl & GEN8_DE_PORT_IRQ) {
2539 tmp = I915_READ(GEN8_DE_PORT_IIR);
2541 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2543 if (tmp & GEN8_AUX_CHANNEL_A)
2544 dp_aux_irq_handler(dev);
2546 DRM_ERROR("Unexpected DE Port interrupt\n");
2549 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2552 for_each_pipe(pipe) {
2555 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2558 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2561 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2562 if (pipe_iir & GEN8_PIPE_VBLANK)
2563 intel_pipe_handle_vblank(dev, pipe);
2565 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2566 intel_prepare_page_flip(dev, pipe);
2567 intel_finish_page_flip_plane(dev, pipe);
2570 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2571 hsw_pipe_crc_irq_handler(dev, pipe);
2573 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2574 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2576 DRM_ERROR("Pipe %c FIFO underrun\n",
2580 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2581 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2583 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2586 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2589 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2591 * FIXME(BDW): Assume for now that the new interrupt handling
2592 * scheme also closed the SDE interrupt handling race we've seen
2593 * on older pch-split platforms. But this needs testing.
2595 u32 pch_iir = I915_READ(SDEIIR);
2597 I915_WRITE(SDEIIR, pch_iir);
2599 cpt_irq_handler(dev, pch_iir);
2601 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2605 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2606 POSTING_READ(GEN8_MASTER_IRQ);
2611 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2612 bool reset_completed)
2614 struct intel_engine_cs *ring;
2618 * Notify all waiters for GPU completion events that reset state has
2619 * been changed, and that they need to restart their wait after
2620 * checking for potential errors (and bail out to drop locks if there is
2621 * a gpu reset pending so that i915_error_work_func can acquire them).
2624 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2625 for_each_ring(ring, dev_priv, i)
2626 wake_up_all(&ring->irq_queue);
2628 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2629 wake_up_all(&dev_priv->pending_flip_queue);
2632 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2633 * reset state is cleared.
2635 if (reset_completed)
2636 wake_up_all(&dev_priv->gpu_error.reset_queue);
2640 * i915_error_work_func - do process context error handling work
2641 * @work: work struct
2643 * Fire an error uevent so userspace can see that a hang or error
2646 static void i915_error_work_func(struct work_struct *work)
2648 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2650 struct drm_i915_private *dev_priv =
2651 container_of(error, struct drm_i915_private, gpu_error);
2652 struct drm_device *dev = dev_priv->dev;
2653 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2654 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2655 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2658 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2661 * Note that there's only one work item which does gpu resets, so we
2662 * need not worry about concurrent gpu resets potentially incrementing
2663 * error->reset_counter twice. We only need to take care of another
2664 * racing irq/hangcheck declaring the gpu dead for a second time. A
2665 * quick check for that is good enough: schedule_work ensures the
2666 * correct ordering between hang detection and this work item, and since
2667 * the reset in-progress bit is only ever set by code outside of this
2668 * work we don't need to worry about any other races.
2670 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2671 DRM_DEBUG_DRIVER("resetting chip\n");
2672 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2676 * In most cases it's guaranteed that we get here with an RPM
2677 * reference held, for example because there is a pending GPU
2678 * request that won't finish until the reset is done. This
2679 * isn't the case at least when we get here by doing a
2680 * simulated reset via debugs, so get an RPM reference.
2682 intel_runtime_pm_get(dev_priv);
2684 * All state reset _must_ be completed before we update the
2685 * reset counter, for otherwise waiters might miss the reset
2686 * pending state and not properly drop locks, resulting in
2687 * deadlocks with the reset work.
2689 ret = i915_reset(dev);
2691 intel_display_handle_reset(dev);
2693 intel_runtime_pm_put(dev_priv);
2697 * After all the gem state is reset, increment the reset
2698 * counter and wake up everyone waiting for the reset to
2701 * Since unlock operations are a one-sided barrier only,
2702 * we need to insert a barrier here to order any seqno
2704 * the counter increment.
2706 smp_mb__before_atomic();
2707 atomic_inc(&dev_priv->gpu_error.reset_counter);
2709 kobject_uevent_env(&dev->primary->kdev->kobj,
2710 KOBJ_CHANGE, reset_done_event);
2712 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2716 * Note: The wake_up also serves as a memory barrier so that
2717 * waiters see the update value of the reset counter atomic_t.
2719 i915_error_wake_up(dev_priv, true);
2723 static void i915_report_and_clear_eir(struct drm_device *dev)
2725 struct drm_i915_private *dev_priv = dev->dev_private;
2726 uint32_t instdone[I915_NUM_INSTDONE_REG];
2727 u32 eir = I915_READ(EIR);
2733 pr_err("render error detected, EIR: 0x%08x\n", eir);
2735 i915_get_extra_instdone(dev, instdone);
2738 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2739 u32 ipeir = I915_READ(IPEIR_I965);
2741 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2742 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2743 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2744 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2745 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2746 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2747 I915_WRITE(IPEIR_I965, ipeir);
2748 POSTING_READ(IPEIR_I965);
2750 if (eir & GM45_ERROR_PAGE_TABLE) {
2751 u32 pgtbl_err = I915_READ(PGTBL_ER);
2752 pr_err("page table error\n");
2753 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2754 I915_WRITE(PGTBL_ER, pgtbl_err);
2755 POSTING_READ(PGTBL_ER);
2759 if (!IS_GEN2(dev)) {
2760 if (eir & I915_ERROR_PAGE_TABLE) {
2761 u32 pgtbl_err = I915_READ(PGTBL_ER);
2762 pr_err("page table error\n");
2763 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2764 I915_WRITE(PGTBL_ER, pgtbl_err);
2765 POSTING_READ(PGTBL_ER);
2769 if (eir & I915_ERROR_MEMORY_REFRESH) {
2770 pr_err("memory refresh error:\n");
2772 pr_err("pipe %c stat: 0x%08x\n",
2773 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2774 /* pipestat has already been acked */
2776 if (eir & I915_ERROR_INSTRUCTION) {
2777 pr_err("instruction error\n");
2778 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2779 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2780 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2781 if (INTEL_INFO(dev)->gen < 4) {
2782 u32 ipeir = I915_READ(IPEIR);
2784 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2785 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2786 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2787 I915_WRITE(IPEIR, ipeir);
2788 POSTING_READ(IPEIR);
2790 u32 ipeir = I915_READ(IPEIR_I965);
2792 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2793 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2794 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2795 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2796 I915_WRITE(IPEIR_I965, ipeir);
2797 POSTING_READ(IPEIR_I965);
2801 I915_WRITE(EIR, eir);
2803 eir = I915_READ(EIR);
2806 * some errors might have become stuck,
2809 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2810 I915_WRITE(EMR, I915_READ(EMR) | eir);
2811 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2816 * i915_handle_error - handle an error interrupt
2819 * Do some basic checking of regsiter state at error interrupt time and
2820 * dump it to the syslog. Also call i915_capture_error_state() to make
2821 * sure we get a record and make it available in debugfs. Fire a uevent
2822 * so userspace knows something bad happened (should trigger collection
2823 * of a ring dump etc.).
2825 void i915_handle_error(struct drm_device *dev, bool wedged,
2826 const char *fmt, ...)
2828 struct drm_i915_private *dev_priv = dev->dev_private;
2832 va_start(args, fmt);
2833 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2836 i915_capture_error_state(dev, wedged, error_msg);
2837 i915_report_and_clear_eir(dev);
2840 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2841 &dev_priv->gpu_error.reset_counter);
2844 * Wakeup waiting processes so that the reset work function
2845 * i915_error_work_func doesn't deadlock trying to grab various
2846 * locks. By bumping the reset counter first, the woken
2847 * processes will see a reset in progress and back off,
2848 * releasing their locks and then wait for the reset completion.
2849 * We must do this for _all_ gpu waiters that might hold locks
2850 * that the reset work needs to acquire.
2852 * Note: The wake_up serves as the required memory barrier to
2853 * ensure that the waiters see the updated value of the reset
2856 i915_error_wake_up(dev_priv, false);
2860 * Our reset work can grab modeset locks (since it needs to reset the
2861 * state of outstanding pagelips). Hence it must not be run on our own
2862 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2863 * code will deadlock.
2865 schedule_work(&dev_priv->gpu_error.work);
2868 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2870 struct drm_i915_private *dev_priv = dev->dev_private;
2871 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2873 struct drm_i915_gem_object *obj;
2874 struct intel_unpin_work *work;
2875 unsigned long flags;
2876 bool stall_detected;
2878 /* Ignore early vblank irqs */
2879 if (intel_crtc == NULL)
2882 spin_lock_irqsave(&dev->event_lock, flags);
2883 work = intel_crtc->unpin_work;
2886 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2887 !work->enable_stall_check) {
2888 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2889 spin_unlock_irqrestore(&dev->event_lock, flags);
2893 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2894 obj = work->pending_flip_obj;
2895 if (INTEL_INFO(dev)->gen >= 4) {
2896 int dspsurf = DSPSURF(intel_crtc->plane);
2897 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2898 i915_gem_obj_ggtt_offset(obj);
2900 int dspaddr = DSPADDR(intel_crtc->plane);
2901 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2902 crtc->y * crtc->primary->fb->pitches[0] +
2903 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2906 spin_unlock_irqrestore(&dev->event_lock, flags);
2908 if (stall_detected) {
2909 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2910 intel_prepare_page_flip(dev, intel_crtc->plane);
2914 /* Called from drm generic code, passed 'crtc' which
2915 * we use as a pipe index
2917 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920 unsigned long irqflags;
2922 if (!i915_pipe_enabled(dev, pipe))
2925 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2926 if (INTEL_INFO(dev)->gen >= 4)
2927 i915_enable_pipestat(dev_priv, pipe,
2928 PIPE_START_VBLANK_INTERRUPT_STATUS);
2930 i915_enable_pipestat(dev_priv, pipe,
2931 PIPE_VBLANK_INTERRUPT_STATUS);
2932 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2937 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 unsigned long irqflags;
2941 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2942 DE_PIPE_VBLANK(pipe);
2944 if (!i915_pipe_enabled(dev, pipe))
2947 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2948 ironlake_enable_display_irq(dev_priv, bit);
2949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2954 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2956 struct drm_i915_private *dev_priv = dev->dev_private;
2957 unsigned long irqflags;
2959 if (!i915_pipe_enabled(dev, pipe))
2962 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2963 i915_enable_pipestat(dev_priv, pipe,
2964 PIPE_START_VBLANK_INTERRUPT_STATUS);
2965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2970 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2972 struct drm_i915_private *dev_priv = dev->dev_private;
2973 unsigned long irqflags;
2975 if (!i915_pipe_enabled(dev, pipe))
2978 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2979 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2980 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2981 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2986 /* Called from drm generic code, passed 'crtc' which
2987 * we use as a pipe index
2989 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 unsigned long irqflags;
2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2995 i915_disable_pipestat(dev_priv, pipe,
2996 PIPE_VBLANK_INTERRUPT_STATUS |
2997 PIPE_START_VBLANK_INTERRUPT_STATUS);
2998 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3001 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
3003 struct drm_i915_private *dev_priv = dev->dev_private;
3004 unsigned long irqflags;
3005 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3006 DE_PIPE_VBLANK(pipe);
3008 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3009 ironlake_disable_display_irq(dev_priv, bit);
3010 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3013 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 unsigned long irqflags;
3018 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3019 i915_disable_pipestat(dev_priv, pipe,
3020 PIPE_START_VBLANK_INTERRUPT_STATUS);
3021 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3024 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3026 struct drm_i915_private *dev_priv = dev->dev_private;
3027 unsigned long irqflags;
3029 if (!i915_pipe_enabled(dev, pipe))
3032 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3033 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3034 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3035 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3036 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3040 ring_last_seqno(struct intel_engine_cs *ring)
3042 return list_entry(ring->request_list.prev,
3043 struct drm_i915_gem_request, list)->seqno;
3047 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3049 return (list_empty(&ring->request_list) ||
3050 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3054 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3056 if (INTEL_INFO(dev)->gen >= 8) {
3057 return (ipehr >> 23) == 0x1c;
3059 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3060 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3061 MI_SEMAPHORE_REGISTER);
3065 static struct intel_engine_cs *
3066 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3068 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3069 struct intel_engine_cs *signaller;
3072 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3073 for_each_ring(signaller, dev_priv, i) {
3074 if (ring == signaller)
3077 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3081 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3083 for_each_ring(signaller, dev_priv, i) {
3084 if(ring == signaller)
3087 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3093 ring->id, ipehr, offset);
3098 static struct intel_engine_cs *
3099 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3101 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3102 u32 cmd, ipehr, head;
3106 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3107 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3111 * HEAD is likely pointing to the dword after the actual command,
3112 * so scan backwards until we find the MBOX. But limit it to just 3
3113 * or 4 dwords depending on the semaphore wait command size.
3114 * Note that we don't care about ACTHD here since that might
3115 * point at at batch, and semaphores are always emitted into the
3116 * ringbuffer itself.
3118 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3119 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3121 for (i = backwards; i; --i) {
3123 * Be paranoid and presume the hw has gone off into the wild -
3124 * our ring is smaller than what the hardware (and hence
3125 * HEAD_ADDR) allows. Also handles wrap-around.
3127 head &= ring->buffer->size - 1;
3129 /* This here seems to blow up */
3130 cmd = ioread32(ring->buffer->virtual_start + head);
3140 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3141 if (INTEL_INFO(ring->dev)->gen >= 8) {
3142 offset = ioread32(ring->buffer->virtual_start + head + 12);
3144 offset = ioread32(ring->buffer->virtual_start + head + 8);
3146 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3149 static int semaphore_passed(struct intel_engine_cs *ring)
3151 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3152 struct intel_engine_cs *signaller;
3155 ring->hangcheck.deadlock++;
3157 signaller = semaphore_waits_for(ring, &seqno);
3158 if (signaller == NULL)
3161 /* Prevent pathological recursion due to driver bugs */
3162 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3165 /* cursory check for an unkickable deadlock */
3166 ctl = I915_READ_CTL(signaller);
3167 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
3170 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3173 if (signaller->hangcheck.deadlock)
3179 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3181 struct intel_engine_cs *ring;
3184 for_each_ring(ring, dev_priv, i)
3185 ring->hangcheck.deadlock = 0;
3188 static enum intel_ring_hangcheck_action
3189 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3191 struct drm_device *dev = ring->dev;
3192 struct drm_i915_private *dev_priv = dev->dev_private;
3195 if (ring->hangcheck.acthd != acthd)
3196 return HANGCHECK_ACTIVE;
3199 return HANGCHECK_HUNG;
3201 /* Is the chip hanging on a WAIT_FOR_EVENT?
3202 * If so we can simply poke the RB_WAIT bit
3203 * and break the hang. This should work on
3204 * all but the second generation chipsets.
3206 tmp = I915_READ_CTL(ring);
3207 if (tmp & RING_WAIT) {
3208 i915_handle_error(dev, false,
3209 "Kicking stuck wait on %s",
3211 I915_WRITE_CTL(ring, tmp);
3212 return HANGCHECK_KICK;
3215 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3216 switch (semaphore_passed(ring)) {
3218 return HANGCHECK_HUNG;
3220 i915_handle_error(dev, false,
3221 "Kicking stuck semaphore on %s",
3223 I915_WRITE_CTL(ring, tmp);
3224 return HANGCHECK_KICK;
3226 return HANGCHECK_WAIT;
3230 return HANGCHECK_HUNG;
3234 * This is called when the chip hasn't reported back with completed
3235 * batchbuffers in a long time. We keep track per ring seqno progress and
3236 * if there are no progress, hangcheck score for that ring is increased.
3237 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3238 * we kick the ring. If we see no progress on three subsequent calls
3239 * we assume chip is wedged and try to fix it by resetting the chip.
3241 static void i915_hangcheck_elapsed(unsigned long data)
3243 struct drm_device *dev = (struct drm_device *)data;
3244 struct drm_i915_private *dev_priv = dev->dev_private;
3245 struct intel_engine_cs *ring;
3247 int busy_count = 0, rings_hung = 0;
3248 bool stuck[I915_NUM_RINGS] = { 0 };
3253 if (!i915.enable_hangcheck)
3256 for_each_ring(ring, dev_priv, i) {
3261 semaphore_clear_deadlocks(dev_priv);
3263 seqno = ring->get_seqno(ring, false);
3264 acthd = intel_ring_get_active_head(ring);
3266 if (ring->hangcheck.seqno == seqno) {
3267 if (ring_idle(ring, seqno)) {
3268 ring->hangcheck.action = HANGCHECK_IDLE;
3270 if (waitqueue_active(&ring->irq_queue)) {
3271 /* Issue a wake-up to catch stuck h/w. */
3272 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3273 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3274 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3277 DRM_INFO("Fake missed irq on %s\n",
3279 wake_up_all(&ring->irq_queue);
3281 /* Safeguard against driver failure */
3282 ring->hangcheck.score += BUSY;
3286 /* We always increment the hangcheck score
3287 * if the ring is busy and still processing
3288 * the same request, so that no single request
3289 * can run indefinitely (such as a chain of
3290 * batches). The only time we do not increment
3291 * the hangcheck score on this ring, if this
3292 * ring is in a legitimate wait for another
3293 * ring. In that case the waiting ring is a
3294 * victim and we want to be sure we catch the
3295 * right culprit. Then every time we do kick
3296 * the ring, add a small increment to the
3297 * score so that we can catch a batch that is
3298 * being repeatedly kicked and so responsible
3299 * for stalling the machine.
3301 ring->hangcheck.action = ring_stuck(ring,
3304 switch (ring->hangcheck.action) {
3305 case HANGCHECK_IDLE:
3306 case HANGCHECK_WAIT:
3308 case HANGCHECK_ACTIVE:
3309 ring->hangcheck.score += BUSY;
3311 case HANGCHECK_KICK:
3312 ring->hangcheck.score += KICK;
3314 case HANGCHECK_HUNG:
3315 ring->hangcheck.score += HUNG;
3321 ring->hangcheck.action = HANGCHECK_ACTIVE;
3323 /* Gradually reduce the count so that we catch DoS
3324 * attempts across multiple batches.
3326 if (ring->hangcheck.score > 0)
3327 ring->hangcheck.score--;
3330 ring->hangcheck.seqno = seqno;
3331 ring->hangcheck.acthd = acthd;
3335 for_each_ring(ring, dev_priv, i) {
3336 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3337 DRM_INFO("%s on %s\n",
3338 stuck[i] ? "stuck" : "no progress",
3345 return i915_handle_error(dev, true, "Ring hung");
3348 /* Reset timer case chip hangs without another request
3350 i915_queue_hangcheck(dev);
3353 void i915_queue_hangcheck(struct drm_device *dev)
3355 struct drm_i915_private *dev_priv = dev->dev_private;
3356 if (!i915.enable_hangcheck)
3359 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3363 static void ibx_irq_reset(struct drm_device *dev)
3365 struct drm_i915_private *dev_priv = dev->dev_private;
3367 if (HAS_PCH_NOP(dev))
3370 GEN5_IRQ_RESET(SDE);
3372 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3373 I915_WRITE(SERR_INT, 0xffffffff);
3377 * SDEIER is also touched by the interrupt handler to work around missed PCH
3378 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3379 * instead we unconditionally enable all PCH interrupt sources here, but then
3380 * only unmask them as needed with SDEIMR.
3382 * This function needs to be called before interrupts are enabled.
3384 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3386 struct drm_i915_private *dev_priv = dev->dev_private;
3388 if (HAS_PCH_NOP(dev))
3391 WARN_ON(I915_READ(SDEIER) != 0);
3392 I915_WRITE(SDEIER, 0xffffffff);
3393 POSTING_READ(SDEIER);
3396 static void gen5_gt_irq_reset(struct drm_device *dev)
3398 struct drm_i915_private *dev_priv = dev->dev_private;
3401 if (INTEL_INFO(dev)->gen >= 6)
3402 GEN5_IRQ_RESET(GEN6_PM);
3407 static void ironlake_irq_reset(struct drm_device *dev)
3409 struct drm_i915_private *dev_priv = dev->dev_private;
3411 I915_WRITE(HWSTAM, 0xffffffff);
3415 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3417 gen5_gt_irq_reset(dev);
3422 static void valleyview_irq_preinstall(struct drm_device *dev)
3424 struct drm_i915_private *dev_priv = dev->dev_private;
3428 I915_WRITE(VLV_IMR, 0);
3429 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3430 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3431 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3434 I915_WRITE(GTIIR, I915_READ(GTIIR));
3435 I915_WRITE(GTIIR, I915_READ(GTIIR));
3437 gen5_gt_irq_reset(dev);
3439 I915_WRITE(DPINVGTT, 0xff);
3441 I915_WRITE(PORT_HOTPLUG_EN, 0);
3442 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3444 I915_WRITE(PIPESTAT(pipe), 0xffff);
3445 I915_WRITE(VLV_IIR, 0xffffffff);
3446 I915_WRITE(VLV_IMR, 0xffffffff);
3447 I915_WRITE(VLV_IER, 0x0);
3448 POSTING_READ(VLV_IER);
3451 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3453 GEN8_IRQ_RESET_NDX(GT, 0);
3454 GEN8_IRQ_RESET_NDX(GT, 1);
3455 GEN8_IRQ_RESET_NDX(GT, 2);
3456 GEN8_IRQ_RESET_NDX(GT, 3);
3459 static void gen8_irq_reset(struct drm_device *dev)
3461 struct drm_i915_private *dev_priv = dev->dev_private;
3464 I915_WRITE(GEN8_MASTER_IRQ, 0);
3465 POSTING_READ(GEN8_MASTER_IRQ);
3467 gen8_gt_irq_reset(dev_priv);
3470 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3472 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3473 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3474 GEN5_IRQ_RESET(GEN8_PCU_);
3479 static void cherryview_irq_preinstall(struct drm_device *dev)
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3484 I915_WRITE(GEN8_MASTER_IRQ, 0);
3485 POSTING_READ(GEN8_MASTER_IRQ);
3487 gen8_gt_irq_reset(dev_priv);
3489 GEN5_IRQ_RESET(GEN8_PCU_);
3491 POSTING_READ(GEN8_PCU_IIR);
3493 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3495 I915_WRITE(PORT_HOTPLUG_EN, 0);
3496 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3499 I915_WRITE(PIPESTAT(pipe), 0xffff);
3501 I915_WRITE(VLV_IMR, 0xffffffff);
3502 I915_WRITE(VLV_IER, 0x0);
3503 I915_WRITE(VLV_IIR, 0xffffffff);
3504 POSTING_READ(VLV_IIR);
3507 static void ibx_hpd_irq_setup(struct drm_device *dev)
3509 struct drm_i915_private *dev_priv = dev->dev_private;
3510 struct drm_mode_config *mode_config = &dev->mode_config;
3511 struct intel_encoder *intel_encoder;
3512 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3514 if (HAS_PCH_IBX(dev)) {
3515 hotplug_irqs = SDE_HOTPLUG_MASK;
3516 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3517 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3518 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3520 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3521 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3522 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3523 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3526 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3529 * Enable digital hotplug on the PCH, and configure the DP short pulse
3530 * duration to 2ms (which is the minimum in the Display Port spec)
3532 * This register is the same on all known PCH chips.
3534 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3535 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3536 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3537 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3538 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3539 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3542 static void ibx_irq_postinstall(struct drm_device *dev)
3544 struct drm_i915_private *dev_priv = dev->dev_private;
3547 if (HAS_PCH_NOP(dev))
3550 if (HAS_PCH_IBX(dev))
3551 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3553 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3555 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3556 I915_WRITE(SDEIMR, ~mask);
3559 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 u32 pm_irqs, gt_irqs;
3564 pm_irqs = gt_irqs = 0;
3566 dev_priv->gt_irq_mask = ~0;
3567 if (HAS_L3_DPF(dev)) {
3568 /* L3 parity interrupt is always unmasked. */
3569 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3570 gt_irqs |= GT_PARITY_ERROR(dev);
3573 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3575 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3576 ILK_BSD_USER_INTERRUPT;
3578 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3581 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3583 if (INTEL_INFO(dev)->gen >= 6) {
3584 pm_irqs |= dev_priv->pm_rps_events;
3587 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3589 dev_priv->pm_irq_mask = 0xffffffff;
3590 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3594 static int ironlake_irq_postinstall(struct drm_device *dev)
3596 unsigned long irqflags;
3597 struct drm_i915_private *dev_priv = dev->dev_private;
3598 u32 display_mask, extra_mask;
3600 if (INTEL_INFO(dev)->gen >= 7) {
3601 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3602 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3603 DE_PLANEB_FLIP_DONE_IVB |
3604 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3605 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3606 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3608 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3609 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3611 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3613 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3614 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3617 dev_priv->irq_mask = ~display_mask;
3619 I915_WRITE(HWSTAM, 0xeffe);
3621 ibx_irq_pre_postinstall(dev);
3623 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3625 gen5_gt_irq_postinstall(dev);
3627 ibx_irq_postinstall(dev);
3629 if (IS_IRONLAKE_M(dev)) {
3630 /* Enable PCU event interrupts
3632 * spinlocking not required here for correctness since interrupt
3633 * setup is guaranteed to run in single-threaded context. But we
3634 * need it to make the assert_spin_locked happy. */
3635 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3636 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3637 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3643 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3648 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3649 PIPE_FIFO_UNDERRUN_STATUS;
3651 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3652 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3653 POSTING_READ(PIPESTAT(PIPE_A));
3655 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3656 PIPE_CRC_DONE_INTERRUPT_STATUS;
3658 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3659 PIPE_GMBUS_INTERRUPT_STATUS);
3660 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3662 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3663 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3664 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3665 dev_priv->irq_mask &= ~iir_mask;
3667 I915_WRITE(VLV_IIR, iir_mask);
3668 I915_WRITE(VLV_IIR, iir_mask);
3669 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3670 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3671 POSTING_READ(VLV_IER);
3674 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3679 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3680 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3681 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3683 dev_priv->irq_mask |= iir_mask;
3684 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3685 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3686 I915_WRITE(VLV_IIR, iir_mask);
3687 I915_WRITE(VLV_IIR, iir_mask);
3688 POSTING_READ(VLV_IIR);
3690 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3691 PIPE_CRC_DONE_INTERRUPT_STATUS;
3693 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3694 PIPE_GMBUS_INTERRUPT_STATUS);
3695 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3697 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3698 PIPE_FIFO_UNDERRUN_STATUS;
3699 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3700 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3701 POSTING_READ(PIPESTAT(PIPE_A));
3704 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3706 assert_spin_locked(&dev_priv->irq_lock);
3708 if (dev_priv->display_irqs_enabled)
3711 dev_priv->display_irqs_enabled = true;
3713 if (dev_priv->dev->irq_enabled)
3714 valleyview_display_irqs_install(dev_priv);
3717 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3719 assert_spin_locked(&dev_priv->irq_lock);
3721 if (!dev_priv->display_irqs_enabled)
3724 dev_priv->display_irqs_enabled = false;
3726 if (dev_priv->dev->irq_enabled)
3727 valleyview_display_irqs_uninstall(dev_priv);
3730 static int valleyview_irq_postinstall(struct drm_device *dev)
3732 struct drm_i915_private *dev_priv = dev->dev_private;
3733 unsigned long irqflags;
3735 dev_priv->irq_mask = ~0;
3737 I915_WRITE(PORT_HOTPLUG_EN, 0);
3738 POSTING_READ(PORT_HOTPLUG_EN);
3740 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3741 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3742 I915_WRITE(VLV_IIR, 0xffffffff);
3743 POSTING_READ(VLV_IER);
3745 /* Interrupt setup is already guaranteed to be single-threaded, this is
3746 * just to make the assert_spin_locked check happy. */
3747 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3748 if (dev_priv->display_irqs_enabled)
3749 valleyview_display_irqs_install(dev_priv);
3750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3752 I915_WRITE(VLV_IIR, 0xffffffff);
3753 I915_WRITE(VLV_IIR, 0xffffffff);
3755 gen5_gt_irq_postinstall(dev);
3757 /* ack & enable invalid PTE error interrupts */
3758 #if 0 /* FIXME: add support to irq handler for checking these bits */
3759 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3760 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3763 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3768 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3772 /* These are interrupts we'll toggle with the ring mask register */
3773 uint32_t gt_interrupts[] = {
3774 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3775 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3776 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3777 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3778 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3780 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3783 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3784 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3786 dev_priv->pm_irq_mask = 0xffffffff;
3789 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3791 struct drm_device *dev = dev_priv->dev;
3792 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3793 GEN8_PIPE_CDCLK_CRC_DONE |
3794 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3795 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3796 GEN8_PIPE_FIFO_UNDERRUN;
3798 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3799 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3800 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3803 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3806 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3809 static int gen8_irq_postinstall(struct drm_device *dev)
3811 struct drm_i915_private *dev_priv = dev->dev_private;
3813 ibx_irq_pre_postinstall(dev);
3815 gen8_gt_irq_postinstall(dev_priv);
3816 gen8_de_irq_postinstall(dev_priv);
3818 ibx_irq_postinstall(dev);
3820 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3821 POSTING_READ(GEN8_MASTER_IRQ);
3826 static int cherryview_irq_postinstall(struct drm_device *dev)
3828 struct drm_i915_private *dev_priv = dev->dev_private;
3829 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3830 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3831 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3832 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3833 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3834 PIPE_CRC_DONE_INTERRUPT_STATUS;
3835 unsigned long irqflags;
3839 * Leave vblank interrupts masked initially. enable/disable will
3840 * toggle them based on usage.
3842 dev_priv->irq_mask = ~enable_mask;
3845 I915_WRITE(PIPESTAT(pipe), 0xffff);
3847 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3848 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3850 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3851 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3853 I915_WRITE(VLV_IIR, 0xffffffff);
3854 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3855 I915_WRITE(VLV_IER, enable_mask);
3857 gen8_gt_irq_postinstall(dev_priv);
3859 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3860 POSTING_READ(GEN8_MASTER_IRQ);
3865 static void gen8_irq_uninstall(struct drm_device *dev)
3867 struct drm_i915_private *dev_priv = dev->dev_private;
3872 intel_hpd_irq_uninstall(dev_priv);
3874 gen8_irq_reset(dev);
3877 static void valleyview_irq_uninstall(struct drm_device *dev)
3879 struct drm_i915_private *dev_priv = dev->dev_private;
3880 unsigned long irqflags;
3886 I915_WRITE(VLV_MASTER_IER, 0);
3888 intel_hpd_irq_uninstall(dev_priv);
3891 I915_WRITE(PIPESTAT(pipe), 0xffff);
3893 I915_WRITE(HWSTAM, 0xffffffff);
3894 I915_WRITE(PORT_HOTPLUG_EN, 0);
3895 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3897 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3898 if (dev_priv->display_irqs_enabled)
3899 valleyview_display_irqs_uninstall(dev_priv);
3900 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3902 dev_priv->irq_mask = 0;
3904 I915_WRITE(VLV_IIR, 0xffffffff);
3905 I915_WRITE(VLV_IMR, 0xffffffff);
3906 I915_WRITE(VLV_IER, 0x0);
3907 POSTING_READ(VLV_IER);
3910 static void cherryview_irq_uninstall(struct drm_device *dev)
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3918 I915_WRITE(GEN8_MASTER_IRQ, 0);
3919 POSTING_READ(GEN8_MASTER_IRQ);
3921 #define GEN8_IRQ_FINI_NDX(type, which) \
3923 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3924 I915_WRITE(GEN8_##type##_IER(which), 0); \
3925 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3926 POSTING_READ(GEN8_##type##_IIR(which)); \
3927 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3930 #define GEN8_IRQ_FINI(type) \
3932 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3933 I915_WRITE(GEN8_##type##_IER, 0); \
3934 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3935 POSTING_READ(GEN8_##type##_IIR); \
3936 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3939 GEN8_IRQ_FINI_NDX(GT, 0);
3940 GEN8_IRQ_FINI_NDX(GT, 1);
3941 GEN8_IRQ_FINI_NDX(GT, 2);
3942 GEN8_IRQ_FINI_NDX(GT, 3);
3946 #undef GEN8_IRQ_FINI
3947 #undef GEN8_IRQ_FINI_NDX
3949 I915_WRITE(PORT_HOTPLUG_EN, 0);
3950 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3953 I915_WRITE(PIPESTAT(pipe), 0xffff);
3955 I915_WRITE(VLV_IMR, 0xffffffff);
3956 I915_WRITE(VLV_IER, 0x0);
3957 I915_WRITE(VLV_IIR, 0xffffffff);
3958 POSTING_READ(VLV_IIR);
3961 static void ironlake_irq_uninstall(struct drm_device *dev)
3963 struct drm_i915_private *dev_priv = dev->dev_private;
3968 intel_hpd_irq_uninstall(dev_priv);
3970 ironlake_irq_reset(dev);
3973 static void i8xx_irq_preinstall(struct drm_device * dev)
3975 struct drm_i915_private *dev_priv = dev->dev_private;
3979 I915_WRITE(PIPESTAT(pipe), 0);
3980 I915_WRITE16(IMR, 0xffff);
3981 I915_WRITE16(IER, 0x0);
3982 POSTING_READ16(IER);
3985 static int i8xx_irq_postinstall(struct drm_device *dev)
3987 struct drm_i915_private *dev_priv = dev->dev_private;
3988 unsigned long irqflags;
3991 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3993 /* Unmask the interrupts that we always want on. */
3994 dev_priv->irq_mask =
3995 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3996 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3997 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3998 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3999 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4000 I915_WRITE16(IMR, dev_priv->irq_mask);
4003 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4004 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4005 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4006 I915_USER_INTERRUPT);
4007 POSTING_READ16(IER);
4009 /* Interrupt setup is already guaranteed to be single-threaded, this is
4010 * just to make the assert_spin_locked check happy. */
4011 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4012 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4013 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4014 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4020 * Returns true when a page flip has completed.
4022 static bool i8xx_handle_vblank(struct drm_device *dev,
4023 int plane, int pipe, u32 iir)
4025 struct drm_i915_private *dev_priv = dev->dev_private;
4026 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4028 if (!intel_pipe_handle_vblank(dev, pipe))
4031 if ((iir & flip_pending) == 0)
4034 intel_prepare_page_flip(dev, plane);
4036 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4037 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4038 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4039 * the flip is completed (no longer pending). Since this doesn't raise
4040 * an interrupt per se, we watch for the change at vblank.
4042 if (I915_READ16(ISR) & flip_pending)
4045 intel_finish_page_flip(dev, pipe);
4050 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4052 struct drm_device *dev = arg;
4053 struct drm_i915_private *dev_priv = dev->dev_private;
4056 unsigned long irqflags;
4059 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4060 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4062 iir = I915_READ16(IIR);
4066 while (iir & ~flip_mask) {
4067 /* Can't rely on pipestat interrupt bit in iir as it might
4068 * have been cleared after the pipestat interrupt was received.
4069 * It doesn't set the bit in iir again, but it still produces
4070 * interrupts (for non-MSI).
4072 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4073 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4074 i915_handle_error(dev, false,
4075 "Command parser error, iir 0x%08x",
4078 for_each_pipe(pipe) {
4079 int reg = PIPESTAT(pipe);
4080 pipe_stats[pipe] = I915_READ(reg);
4083 * Clear the PIPE*STAT regs before the IIR
4085 if (pipe_stats[pipe] & 0x8000ffff)
4086 I915_WRITE(reg, pipe_stats[pipe]);
4088 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4090 I915_WRITE16(IIR, iir & ~flip_mask);
4091 new_iir = I915_READ16(IIR); /* Flush posted writes */
4093 i915_update_dri1_breadcrumb(dev);
4095 if (iir & I915_USER_INTERRUPT)
4096 notify_ring(dev, &dev_priv->ring[RCS]);
4098 for_each_pipe(pipe) {
4103 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4104 i8xx_handle_vblank(dev, plane, pipe, iir))
4105 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4107 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4108 i9xx_pipe_crc_irq_handler(dev, pipe);
4110 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4111 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4112 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4121 static void i8xx_irq_uninstall(struct drm_device * dev)
4123 struct drm_i915_private *dev_priv = dev->dev_private;
4126 for_each_pipe(pipe) {
4127 /* Clear enable bits; then clear status bits */
4128 I915_WRITE(PIPESTAT(pipe), 0);
4129 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4131 I915_WRITE16(IMR, 0xffff);
4132 I915_WRITE16(IER, 0x0);
4133 I915_WRITE16(IIR, I915_READ16(IIR));
4136 static void i915_irq_preinstall(struct drm_device * dev)
4138 struct drm_i915_private *dev_priv = dev->dev_private;
4141 if (I915_HAS_HOTPLUG(dev)) {
4142 I915_WRITE(PORT_HOTPLUG_EN, 0);
4143 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4146 I915_WRITE16(HWSTAM, 0xeffe);
4148 I915_WRITE(PIPESTAT(pipe), 0);
4149 I915_WRITE(IMR, 0xffffffff);
4150 I915_WRITE(IER, 0x0);
4154 static int i915_irq_postinstall(struct drm_device *dev)
4156 struct drm_i915_private *dev_priv = dev->dev_private;
4158 unsigned long irqflags;
4160 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4162 /* Unmask the interrupts that we always want on. */
4163 dev_priv->irq_mask =
4164 ~(I915_ASLE_INTERRUPT |
4165 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4166 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4167 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4168 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4169 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4172 I915_ASLE_INTERRUPT |
4173 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4174 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4175 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4176 I915_USER_INTERRUPT;
4178 if (I915_HAS_HOTPLUG(dev)) {
4179 I915_WRITE(PORT_HOTPLUG_EN, 0);
4180 POSTING_READ(PORT_HOTPLUG_EN);
4182 /* Enable in IER... */
4183 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4184 /* and unmask in IMR */
4185 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4188 I915_WRITE(IMR, dev_priv->irq_mask);
4189 I915_WRITE(IER, enable_mask);
4192 i915_enable_asle_pipestat(dev);
4194 /* Interrupt setup is already guaranteed to be single-threaded, this is
4195 * just to make the assert_spin_locked check happy. */
4196 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4197 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4198 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4199 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4205 * Returns true when a page flip has completed.
4207 static bool i915_handle_vblank(struct drm_device *dev,
4208 int plane, int pipe, u32 iir)
4210 struct drm_i915_private *dev_priv = dev->dev_private;
4211 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4213 if (!intel_pipe_handle_vblank(dev, pipe))
4216 if ((iir & flip_pending) == 0)
4219 intel_prepare_page_flip(dev, plane);
4221 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4222 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4223 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4224 * the flip is completed (no longer pending). Since this doesn't raise
4225 * an interrupt per se, we watch for the change at vblank.
4227 if (I915_READ(ISR) & flip_pending)
4230 intel_finish_page_flip(dev, pipe);
4235 static irqreturn_t i915_irq_handler(int irq, void *arg)
4237 struct drm_device *dev = arg;
4238 struct drm_i915_private *dev_priv = dev->dev_private;
4239 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4240 unsigned long irqflags;
4242 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4243 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4244 int pipe, ret = IRQ_NONE;
4246 iir = I915_READ(IIR);
4248 bool irq_received = (iir & ~flip_mask) != 0;
4249 bool blc_event = false;
4251 /* Can't rely on pipestat interrupt bit in iir as it might
4252 * have been cleared after the pipestat interrupt was received.
4253 * It doesn't set the bit in iir again, but it still produces
4254 * interrupts (for non-MSI).
4256 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4257 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4258 i915_handle_error(dev, false,
4259 "Command parser error, iir 0x%08x",
4262 for_each_pipe(pipe) {
4263 int reg = PIPESTAT(pipe);
4264 pipe_stats[pipe] = I915_READ(reg);
4266 /* Clear the PIPE*STAT regs before the IIR */
4267 if (pipe_stats[pipe] & 0x8000ffff) {
4268 I915_WRITE(reg, pipe_stats[pipe]);
4269 irq_received = true;
4272 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4277 /* Consume port. Then clear IIR or we'll miss events */
4278 if (I915_HAS_HOTPLUG(dev) &&
4279 iir & I915_DISPLAY_PORT_INTERRUPT)
4280 i9xx_hpd_irq_handler(dev);
4282 I915_WRITE(IIR, iir & ~flip_mask);
4283 new_iir = I915_READ(IIR); /* Flush posted writes */
4285 if (iir & I915_USER_INTERRUPT)
4286 notify_ring(dev, &dev_priv->ring[RCS]);
4288 for_each_pipe(pipe) {
4293 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4294 i915_handle_vblank(dev, plane, pipe, iir))
4295 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4297 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4300 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4301 i9xx_pipe_crc_irq_handler(dev, pipe);
4303 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4304 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4305 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4308 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4309 intel_opregion_asle_intr(dev);
4311 /* With MSI, interrupts are only generated when iir
4312 * transitions from zero to nonzero. If another bit got
4313 * set while we were handling the existing iir bits, then
4314 * we would never get another interrupt.
4316 * This is fine on non-MSI as well, as if we hit this path
4317 * we avoid exiting the interrupt handler only to generate
4320 * Note that for MSI this could cause a stray interrupt report
4321 * if an interrupt landed in the time between writing IIR and
4322 * the posting read. This should be rare enough to never
4323 * trigger the 99% of 100,000 interrupts test for disabling
4328 } while (iir & ~flip_mask);
4330 i915_update_dri1_breadcrumb(dev);
4335 static void i915_irq_uninstall(struct drm_device * dev)
4337 struct drm_i915_private *dev_priv = dev->dev_private;
4340 intel_hpd_irq_uninstall(dev_priv);
4342 if (I915_HAS_HOTPLUG(dev)) {
4343 I915_WRITE(PORT_HOTPLUG_EN, 0);
4344 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4347 I915_WRITE16(HWSTAM, 0xffff);
4348 for_each_pipe(pipe) {
4349 /* Clear enable bits; then clear status bits */
4350 I915_WRITE(PIPESTAT(pipe), 0);
4351 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4353 I915_WRITE(IMR, 0xffffffff);
4354 I915_WRITE(IER, 0x0);
4356 I915_WRITE(IIR, I915_READ(IIR));
4359 static void i965_irq_preinstall(struct drm_device * dev)
4361 struct drm_i915_private *dev_priv = dev->dev_private;
4364 I915_WRITE(PORT_HOTPLUG_EN, 0);
4365 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4367 I915_WRITE(HWSTAM, 0xeffe);
4369 I915_WRITE(PIPESTAT(pipe), 0);
4370 I915_WRITE(IMR, 0xffffffff);
4371 I915_WRITE(IER, 0x0);
4375 static int i965_irq_postinstall(struct drm_device *dev)
4377 struct drm_i915_private *dev_priv = dev->dev_private;
4380 unsigned long irqflags;
4382 /* Unmask the interrupts that we always want on. */
4383 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4384 I915_DISPLAY_PORT_INTERRUPT |
4385 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4386 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4387 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4388 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4389 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4391 enable_mask = ~dev_priv->irq_mask;
4392 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4393 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4394 enable_mask |= I915_USER_INTERRUPT;
4397 enable_mask |= I915_BSD_USER_INTERRUPT;
4399 /* Interrupt setup is already guaranteed to be single-threaded, this is
4400 * just to make the assert_spin_locked check happy. */
4401 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4402 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4403 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4404 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4408 * Enable some error detection, note the instruction error mask
4409 * bit is reserved, so we leave it masked.
4412 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4413 GM45_ERROR_MEM_PRIV |
4414 GM45_ERROR_CP_PRIV |
4415 I915_ERROR_MEMORY_REFRESH);
4417 error_mask = ~(I915_ERROR_PAGE_TABLE |
4418 I915_ERROR_MEMORY_REFRESH);
4420 I915_WRITE(EMR, error_mask);
4422 I915_WRITE(IMR, dev_priv->irq_mask);
4423 I915_WRITE(IER, enable_mask);
4426 I915_WRITE(PORT_HOTPLUG_EN, 0);
4427 POSTING_READ(PORT_HOTPLUG_EN);
4429 i915_enable_asle_pipestat(dev);
4434 static void i915_hpd_irq_setup(struct drm_device *dev)
4436 struct drm_i915_private *dev_priv = dev->dev_private;
4437 struct drm_mode_config *mode_config = &dev->mode_config;
4438 struct intel_encoder *intel_encoder;
4441 assert_spin_locked(&dev_priv->irq_lock);
4443 if (I915_HAS_HOTPLUG(dev)) {
4444 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4445 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4446 /* Note HDMI and DP share hotplug bits */
4447 /* enable bits are the same for all generations */
4448 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
4449 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4450 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4451 /* Programming the CRT detection parameters tends
4452 to generate a spurious hotplug event about three
4453 seconds later. So just do it once.
4456 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4457 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4458 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4460 /* Ignore TV since it's buggy */
4461 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4465 static irqreturn_t i965_irq_handler(int irq, void *arg)
4467 struct drm_device *dev = arg;
4468 struct drm_i915_private *dev_priv = dev->dev_private;
4470 u32 pipe_stats[I915_MAX_PIPES];
4471 unsigned long irqflags;
4472 int ret = IRQ_NONE, pipe;
4474 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4475 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4477 iir = I915_READ(IIR);
4480 bool irq_received = (iir & ~flip_mask) != 0;
4481 bool blc_event = false;
4483 /* Can't rely on pipestat interrupt bit in iir as it might
4484 * have been cleared after the pipestat interrupt was received.
4485 * It doesn't set the bit in iir again, but it still produces
4486 * interrupts (for non-MSI).
4488 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4489 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4490 i915_handle_error(dev, false,
4491 "Command parser error, iir 0x%08x",
4494 for_each_pipe(pipe) {
4495 int reg = PIPESTAT(pipe);
4496 pipe_stats[pipe] = I915_READ(reg);
4499 * Clear the PIPE*STAT regs before the IIR
4501 if (pipe_stats[pipe] & 0x8000ffff) {
4502 I915_WRITE(reg, pipe_stats[pipe]);
4503 irq_received = true;
4506 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4513 /* Consume port. Then clear IIR or we'll miss events */
4514 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4515 i9xx_hpd_irq_handler(dev);
4517 I915_WRITE(IIR, iir & ~flip_mask);
4518 new_iir = I915_READ(IIR); /* Flush posted writes */
4520 if (iir & I915_USER_INTERRUPT)
4521 notify_ring(dev, &dev_priv->ring[RCS]);
4522 if (iir & I915_BSD_USER_INTERRUPT)
4523 notify_ring(dev, &dev_priv->ring[VCS]);
4525 for_each_pipe(pipe) {
4526 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4527 i915_handle_vblank(dev, pipe, pipe, iir))
4528 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4530 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4533 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4534 i9xx_pipe_crc_irq_handler(dev, pipe);
4536 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4537 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4538 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4541 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4542 intel_opregion_asle_intr(dev);
4544 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4545 gmbus_irq_handler(dev);
4547 /* With MSI, interrupts are only generated when iir
4548 * transitions from zero to nonzero. If another bit got
4549 * set while we were handling the existing iir bits, then
4550 * we would never get another interrupt.
4552 * This is fine on non-MSI as well, as if we hit this path
4553 * we avoid exiting the interrupt handler only to generate
4556 * Note that for MSI this could cause a stray interrupt report
4557 * if an interrupt landed in the time between writing IIR and
4558 * the posting read. This should be rare enough to never
4559 * trigger the 99% of 100,000 interrupts test for disabling
4565 i915_update_dri1_breadcrumb(dev);
4570 static void i965_irq_uninstall(struct drm_device * dev)
4572 struct drm_i915_private *dev_priv = dev->dev_private;
4578 intel_hpd_irq_uninstall(dev_priv);
4580 I915_WRITE(PORT_HOTPLUG_EN, 0);
4581 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4583 I915_WRITE(HWSTAM, 0xffffffff);
4585 I915_WRITE(PIPESTAT(pipe), 0);
4586 I915_WRITE(IMR, 0xffffffff);
4587 I915_WRITE(IER, 0x0);
4590 I915_WRITE(PIPESTAT(pipe),
4591 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4592 I915_WRITE(IIR, I915_READ(IIR));
4595 static void intel_hpd_irq_reenable(unsigned long data)
4597 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
4598 struct drm_device *dev = dev_priv->dev;
4599 struct drm_mode_config *mode_config = &dev->mode_config;
4600 unsigned long irqflags;
4603 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4604 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4605 struct drm_connector *connector;
4607 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4610 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4612 list_for_each_entry(connector, &mode_config->connector_list, head) {
4613 struct intel_connector *intel_connector = to_intel_connector(connector);
4615 if (intel_connector->encoder->hpd_pin == i) {
4616 if (connector->polled != intel_connector->polled)
4617 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4619 connector->polled = intel_connector->polled;
4620 if (!connector->polled)
4621 connector->polled = DRM_CONNECTOR_POLL_HPD;
4625 if (dev_priv->display.hpd_irq_setup)
4626 dev_priv->display.hpd_irq_setup(dev);
4627 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4630 void intel_irq_init(struct drm_device *dev)
4632 struct drm_i915_private *dev_priv = dev->dev_private;
4634 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4635 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4636 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4637 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4638 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4640 /* Let's track the enabled rps events */
4641 if (IS_VALLEYVIEW(dev))
4642 /* WaGsvRC0ResidenncyMethod:VLV */
4643 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4645 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4647 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4648 i915_hangcheck_elapsed,
4649 (unsigned long) dev);
4650 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
4651 (unsigned long) dev_priv);
4653 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4656 dev->max_vblank_count = 0;
4657 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4658 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4659 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4660 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4662 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4663 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4666 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4667 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4668 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4671 if (IS_CHERRYVIEW(dev)) {
4672 dev->driver->irq_handler = cherryview_irq_handler;
4673 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4674 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4675 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4676 dev->driver->enable_vblank = valleyview_enable_vblank;
4677 dev->driver->disable_vblank = valleyview_disable_vblank;
4678 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4679 } else if (IS_VALLEYVIEW(dev)) {
4680 dev->driver->irq_handler = valleyview_irq_handler;
4681 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4682 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4683 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4684 dev->driver->enable_vblank = valleyview_enable_vblank;
4685 dev->driver->disable_vblank = valleyview_disable_vblank;
4686 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4687 } else if (IS_GEN8(dev)) {
4688 dev->driver->irq_handler = gen8_irq_handler;
4689 dev->driver->irq_preinstall = gen8_irq_reset;
4690 dev->driver->irq_postinstall = gen8_irq_postinstall;
4691 dev->driver->irq_uninstall = gen8_irq_uninstall;
4692 dev->driver->enable_vblank = gen8_enable_vblank;
4693 dev->driver->disable_vblank = gen8_disable_vblank;
4694 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4695 } else if (HAS_PCH_SPLIT(dev)) {
4696 dev->driver->irq_handler = ironlake_irq_handler;
4697 dev->driver->irq_preinstall = ironlake_irq_reset;
4698 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4699 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4700 dev->driver->enable_vblank = ironlake_enable_vblank;
4701 dev->driver->disable_vblank = ironlake_disable_vblank;
4702 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4704 if (INTEL_INFO(dev)->gen == 2) {
4705 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4706 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4707 dev->driver->irq_handler = i8xx_irq_handler;
4708 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4709 } else if (INTEL_INFO(dev)->gen == 3) {
4710 dev->driver->irq_preinstall = i915_irq_preinstall;
4711 dev->driver->irq_postinstall = i915_irq_postinstall;
4712 dev->driver->irq_uninstall = i915_irq_uninstall;
4713 dev->driver->irq_handler = i915_irq_handler;
4714 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4716 dev->driver->irq_preinstall = i965_irq_preinstall;
4717 dev->driver->irq_postinstall = i965_irq_postinstall;
4718 dev->driver->irq_uninstall = i965_irq_uninstall;
4719 dev->driver->irq_handler = i965_irq_handler;
4720 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4722 dev->driver->enable_vblank = i915_enable_vblank;
4723 dev->driver->disable_vblank = i915_disable_vblank;
4727 void intel_hpd_init(struct drm_device *dev)
4729 struct drm_i915_private *dev_priv = dev->dev_private;
4730 struct drm_mode_config *mode_config = &dev->mode_config;
4731 struct drm_connector *connector;
4732 unsigned long irqflags;
4735 for (i = 1; i < HPD_NUM_PINS; i++) {
4736 dev_priv->hpd_stats[i].hpd_cnt = 0;
4737 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4739 list_for_each_entry(connector, &mode_config->connector_list, head) {
4740 struct intel_connector *intel_connector = to_intel_connector(connector);
4741 connector->polled = intel_connector->polled;
4742 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4743 connector->polled = DRM_CONNECTOR_POLL_HPD;
4744 if (intel_connector->mst_port)
4745 connector->polled = DRM_CONNECTOR_POLL_HPD;
4748 /* Interrupt setup is already guaranteed to be single-threaded, this is
4749 * just to make the assert_spin_locked checks happy. */
4750 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4751 if (dev_priv->display.hpd_irq_setup)
4752 dev_priv->display.hpd_irq_setup(dev);
4753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4756 /* Disable interrupts so we can allow runtime PM. */
4757 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4759 struct drm_i915_private *dev_priv = dev->dev_private;
4761 dev->driver->irq_uninstall(dev);
4762 dev_priv->pm.irqs_disabled = true;
4765 /* Restore interrupts so we can recover from runtime PM. */
4766 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4768 struct drm_i915_private *dev_priv = dev->dev_private;
4770 dev_priv->pm.irqs_disabled = false;
4771 dev->driver->irq_preinstall(dev);
4772 dev->driver->irq_postinstall(dev);