1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/sched/clock.h>
10 #include "intel_breadcrumbs.h"
12 #include "intel_gt_irq.h"
13 #include "intel_lrc_reg.h"
14 #include "intel_uncore.h"
15 #include "intel_rps.h"
17 static void guc_irq_handler(struct intel_guc *guc, u16 iir)
19 if (iir & GUC_INTR_GUC2HOST)
20 intel_guc_to_host_event_handler(guc);
24 gen11_gt_engine_identity(struct intel_gt *gt,
25 const unsigned int bank, const unsigned int bit)
27 void __iomem * const regs = gt->uncore->regs;
31 lockdep_assert_held(>->irq_lock);
33 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
36 * NB: Specs do not specify how long to spin wait,
37 * so we do ~100us as an educated guess.
39 timeout_ts = (local_clock() >> 10) + 100;
41 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
42 } while (!(ident & GEN11_INTR_DATA_VALID) &&
43 !time_after32(local_clock() >> 10, timeout_ts));
45 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
46 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
51 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
52 GEN11_INTR_DATA_VALID);
58 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
61 if (instance == OTHER_GUC_INSTANCE)
62 return guc_irq_handler(>->uc.guc, iir);
64 if (instance == OTHER_GTPM_INSTANCE)
65 return gen11_rps_irq_handler(>->rps, iir);
67 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
72 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
73 const u8 instance, const u16 iir)
75 struct intel_engine_cs *engine;
77 if (instance <= MAX_ENGINE_INSTANCE)
78 engine = gt->engine_class[class][instance];
83 return intel_engine_cs_irq(engine, iir);
85 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
90 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
92 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
93 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
94 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
99 if (class <= COPY_ENGINE_CLASS)
100 return gen11_engine_irq_handler(gt, class, instance, intr);
102 if (class == OTHER_CLASS)
103 return gen11_other_irq_handler(gt, instance, intr);
105 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
106 class, instance, intr);
110 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
112 void __iomem * const regs = gt->uncore->regs;
113 unsigned long intr_dw;
116 lockdep_assert_held(>->irq_lock);
118 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
120 for_each_set_bit(bit, &intr_dw, 32) {
121 const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
123 gen11_gt_identity_handler(gt, ident);
126 /* Clear must be after shared has been served for engine */
127 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
130 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
134 spin_lock(>->irq_lock);
136 for (bank = 0; bank < 2; bank++) {
137 if (master_ctl & GEN11_GT_DW_IRQ(bank))
138 gen11_gt_bank_handler(gt, bank);
141 spin_unlock(>->irq_lock);
144 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
145 const unsigned int bank, const unsigned int bit)
147 void __iomem * const regs = gt->uncore->regs;
150 lockdep_assert_held(>->irq_lock);
152 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
155 * According to the BSpec, DW_IIR bits cannot be cleared without
156 * first servicing the Selector & Shared IIR registers.
158 gen11_gt_engine_identity(gt, bank, bit);
161 * We locked GT INT DW by reading it. If we want to (try
162 * to) recover from this successfully, we need to clear
163 * our bit, otherwise we are locking the register for
166 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
174 void gen11_gt_irq_reset(struct intel_gt *gt)
176 struct intel_uncore *uncore = gt->uncore;
178 /* Disable RCS, BCS, VCS and VECS class engines. */
179 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
180 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
182 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
183 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
184 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
185 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
186 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
187 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
189 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
190 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
191 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
192 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
195 void gen11_gt_irq_postinstall(struct intel_gt *gt)
197 struct intel_uncore *uncore = gt->uncore;
198 u32 irqs = GT_RENDER_USER_INTERRUPT;
202 if (!intel_uc_wants_guc_submission(>->uc))
203 irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
204 GT_CONTEXT_SWITCH_INTERRUPT |
205 GT_WAIT_SEMAPHORE_INTERRUPT;
207 dmask = irqs << 16 | irqs;
210 BUILD_BUG_ON(irqs & 0xffff0000);
212 /* Enable RCS, BCS, VCS and VECS class interrupts. */
213 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
214 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
216 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
217 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
218 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
219 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
220 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
221 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
224 * RPS interrupts will get enabled/disabled on demand when RPS itself
225 * is enabled/disabled.
228 gt->pm_imr = ~gt->pm_ier;
229 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
230 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
232 /* Same thing for GuC interrupts */
233 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
234 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
237 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
239 if (gt_iir & GT_RENDER_USER_INTERRUPT)
240 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
243 if (gt_iir & ILK_BSD_USER_INTERRUPT)
244 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
248 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
250 if (!HAS_L3_DPF(gt->i915))
253 spin_lock(>->irq_lock);
254 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
255 spin_unlock(>->irq_lock);
257 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
258 gt->i915->l3_parity.which_slice |= 1 << 1;
260 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
261 gt->i915->l3_parity.which_slice |= 1 << 0;
263 schedule_work(>->i915->l3_parity.error_work);
266 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
268 if (gt_iir & GT_RENDER_USER_INTERRUPT)
269 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
272 if (gt_iir & GT_BSD_USER_INTERRUPT)
273 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
276 if (gt_iir & GT_BLT_USER_INTERRUPT)
277 intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
280 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
281 GT_BSD_CS_ERROR_INTERRUPT |
282 GT_CS_MASTER_ERROR_INTERRUPT))
283 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
285 if (gt_iir & GT_PARITY_ERROR(gt->i915))
286 gen7_parity_error_irq_handler(gt, gt_iir);
289 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
291 void __iomem * const regs = gt->uncore->regs;
294 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
295 iir = raw_reg_read(regs, GEN8_GT_IIR(0));
297 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
298 iir >> GEN8_RCS_IRQ_SHIFT);
299 intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
300 iir >> GEN8_BCS_IRQ_SHIFT);
301 raw_reg_write(regs, GEN8_GT_IIR(0), iir);
305 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
306 iir = raw_reg_read(regs, GEN8_GT_IIR(1));
308 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
309 iir >> GEN8_VCS0_IRQ_SHIFT);
310 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
311 iir >> GEN8_VCS1_IRQ_SHIFT);
312 raw_reg_write(regs, GEN8_GT_IIR(1), iir);
316 if (master_ctl & GEN8_GT_VECS_IRQ) {
317 iir = raw_reg_read(regs, GEN8_GT_IIR(3));
319 intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
320 iir >> GEN8_VECS_IRQ_SHIFT);
321 raw_reg_write(regs, GEN8_GT_IIR(3), iir);
325 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
326 iir = raw_reg_read(regs, GEN8_GT_IIR(2));
328 gen6_rps_irq_handler(>->rps, iir);
329 guc_irq_handler(>->uc.guc, iir >> 16);
330 raw_reg_write(regs, GEN8_GT_IIR(2), iir);
335 void gen8_gt_irq_reset(struct intel_gt *gt)
337 struct intel_uncore *uncore = gt->uncore;
339 GEN8_IRQ_RESET_NDX(uncore, GT, 0);
340 GEN8_IRQ_RESET_NDX(uncore, GT, 1);
341 GEN8_IRQ_RESET_NDX(uncore, GT, 2);
342 GEN8_IRQ_RESET_NDX(uncore, GT, 3);
345 void gen8_gt_irq_postinstall(struct intel_gt *gt)
347 /* These are interrupts we'll toggle with the ring mask register */
349 GT_CS_MASTER_ERROR_INTERRUPT |
350 GT_RENDER_USER_INTERRUPT |
351 GT_CONTEXT_SWITCH_INTERRUPT |
352 GT_WAIT_SEMAPHORE_INTERRUPT;
353 const u32 gt_interrupts[] = {
354 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
355 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
357 irqs << GEN8_VECS_IRQ_SHIFT,
359 struct intel_uncore *uncore = gt->uncore;
362 gt->pm_imr = ~gt->pm_ier;
363 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
364 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
366 * RPS interrupts will get enabled/disabled on demand when RPS itself
367 * is enabled/disabled. Same wil be the case for GuC interrupts.
369 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
370 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
373 static void gen5_gt_update_irq(struct intel_gt *gt,
375 u32 enabled_irq_mask)
377 lockdep_assert_held(>->irq_lock);
379 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
381 gt->gt_imr &= ~interrupt_mask;
382 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
383 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
386 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
388 gen5_gt_update_irq(gt, mask, mask);
389 intel_uncore_posting_read_fw(gt->uncore, GTIMR);
392 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
394 gen5_gt_update_irq(gt, mask, 0);
397 void gen5_gt_irq_reset(struct intel_gt *gt)
399 struct intel_uncore *uncore = gt->uncore;
401 GEN3_IRQ_RESET(uncore, GT);
402 if (GRAPHICS_VER(gt->i915) >= 6)
403 GEN3_IRQ_RESET(uncore, GEN6_PM);
406 void gen5_gt_irq_postinstall(struct intel_gt *gt)
408 struct intel_uncore *uncore = gt->uncore;
413 if (HAS_L3_DPF(gt->i915)) {
414 /* L3 parity interrupt is always unmasked. */
415 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
416 gt_irqs |= GT_PARITY_ERROR(gt->i915);
419 gt_irqs |= GT_RENDER_USER_INTERRUPT;
420 if (GRAPHICS_VER(gt->i915) == 5)
421 gt_irqs |= ILK_BSD_USER_INTERRUPT;
423 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
425 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
427 if (GRAPHICS_VER(gt->i915) >= 6) {
429 * RPS interrupts will get enabled/disabled on demand when RPS
430 * itself is enabled/disabled.
432 if (HAS_ENGINE(gt, VECS0)) {
433 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
434 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
437 gt->pm_imr = 0xffffffff;
438 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);