1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
6 /* File to be included by other .c files */
8 #define XGLUE(a,b) a##b
9 #define GLUE(a,b) XGLUE(a,b)
11 /* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
20 * Ensure any previous store to CPPR is ordered vs.
21 * the subsequent loads from PIPR or ACK.
25 /* Perform the acknowledge OS to register cycle. */
26 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
28 /* Synchronize subsequent queue accesses */
31 /* XXX Check grouping level */
34 if (!((ack >> 8) & TM_QW1_NSR_EO))
37 /* Grab CPPR of the most favored pending interrupt */
40 xc->pending |= 1 << cppr;
42 #ifdef XIVE_RUNTIME_CHECKS
43 /* Check consistency */
44 if (cppr >= xc->hw_cppr)
45 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
46 smp_processor_id(), cppr, xc->hw_cppr);
50 * Update our image of the HW CPPR. We don't yet modify
51 * xc->cppr, this will be done as we scan for interrupts
57 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
61 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
62 offset |= XIVE_ESB_LD_ST_MO;
64 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
65 offset |= offset << 4;
67 val =__x_readq(__x_eoi_page(xd) + offset);
68 #ifdef __LITTLE_ENDIAN__
75 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
77 /* If the XIVE supports the new "store EOI facility, use it */
78 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
79 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
80 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
82 else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
84 * For LSIs the HW EOI cycle is used rather than PQ bits,
85 * as they are automatically re-triggred in HW when still
88 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
93 * Otherwise for EOI, we use the special MMIO that does
94 * a clear of both P and Q and returns the old Q,
95 * except for LSIs where we use the "EOI cycle" special
98 * This allows us to then do a re-trigger if Q was set
99 * rather than synthetizing an interrupt in software
101 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
103 /* Re-trigger if needed */
104 if ((eoi_val & 1) && __x_trig_page(xd))
105 __x_writeq(0, __x_trig_page(xd));
115 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
116 u8 pending, int scan_type)
121 /* Find highest pending priority */
122 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
128 * If pending is 0 this will return 0xff which is what
131 prio = ffs(pending) - 1;
133 /* Don't scan past the guest cppr */
134 if (prio >= xc->cppr || prio > 7) {
135 if (xc->mfrr < xc->cppr) {
142 /* Grab queue and pointers */
143 q = &xc->queues[prio];
148 * Snapshot the queue page. The test further down for EOI
149 * must use the same "copy" that was used by __xive_read_eq
150 * since qpage can be set concurrently and we don't want
153 qpage = READ_ONCE(q->qpage);
157 * Try to fetch from the queue. Will return 0 for a
158 * non-queueing priority (ie, qpage = 0).
160 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
163 * If this was a signal for an MFFR change done by
164 * H_IPI we skip it. Additionally, if we were fetching
165 * we EOI it now, thus re-enabling reception of a new
168 * We also need to do that if prio is 0 and we had no
169 * page for the queue. In this case, we have non-queued
170 * IPI that needs to be EOId.
172 * This is safe because if we have another pending MFRR
173 * change that wasn't observed above, the Q bit will have
174 * been set and another occurrence of the IPI will trigger.
176 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
177 if (scan_type == scan_fetch) {
178 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
183 /* Loop back on same queue with updated idx/toggle */
184 #ifdef XIVE_RUNTIME_CHECKS
185 WARN_ON(hirq && hirq != XICS_IPI);
191 /* If it's the dummy interrupt, continue searching */
192 if (hirq == XICS_DUMMY)
195 /* Clear the pending bit if the queue is now empty */
197 pending &= ~(1 << prio);
200 * Check if the queue count needs adjusting due to
201 * interrupts being moved away.
203 if (atomic_read(&q->pending_count)) {
204 int p = atomic_xchg(&q->pending_count, 0);
206 #ifdef XIVE_RUNTIME_CHECKS
207 WARN_ON(p > atomic_read(&q->count));
209 atomic_sub(p, &q->count);
215 * If the most favoured prio we found pending is less
216 * favored (or equal) than a pending IPI, we return
219 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
225 /* If fetching, update queue pointers */
226 if (scan_type == scan_fetch) {
232 /* If we are just taking a "peek", do nothing else */
233 if (scan_type == scan_poll)
236 /* Update the pending bits */
237 xc->pending = pending;
240 * If this is an EOI that's it, no CPPR adjustment done here,
241 * all we needed was cleanup the stale pending bits and check
242 * if there's anything left.
244 if (scan_type == scan_eoi)
248 * If we found an interrupt, adjust what the guest CPPR should
249 * be as if we had just fetched that interrupt from HW.
251 * Note: This can only make xc->cppr smaller as the previous
252 * loop will only exit with hirq != 0 if prio is lower than
253 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
259 * If it was an IPI the HW CPPR might have been lowered too much
260 * as the HW interrupt we use for IPIs is routed to priority 0.
262 * We re-sync it here.
264 if (xc->cppr != xc->hw_cppr) {
265 xc->hw_cppr = xc->cppr;
266 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
272 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
274 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
278 pr_devel("H_XIRR\n");
280 xc->GLUE(X_STAT_PFX,h_xirr)++;
282 /* First collect pending bits from HW */
283 GLUE(X_PFX,ack_pending)(xc);
285 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
286 xc->pending, xc->hw_cppr, xc->cppr);
288 /* Grab previous CPPR and reverse map it */
289 old_cppr = xive_prio_to_guest(xc->cppr);
291 /* Scan for actual interrupts */
292 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
294 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
295 hirq, xc->hw_cppr, xc->cppr);
297 #ifdef XIVE_RUNTIME_CHECKS
298 /* That should never hit */
299 if (hirq & 0xff000000)
300 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
304 * XXX We could check if the interrupt is masked here and
305 * filter it. If we chose to do so, we would need to do:
317 /* Return interrupt and old CPPR in GPR4 */
318 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
323 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
325 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
326 u8 pending = xc->pending;
329 pr_devel("H_IPOLL(server=%ld)\n", server);
331 xc->GLUE(X_STAT_PFX,h_ipoll)++;
333 /* Grab the target VCPU if not the current one */
334 if (xc->server_num != server) {
335 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
338 xc = vcpu->arch.xive_vcpu;
340 /* Scan all priorities */
343 /* Grab pending interrupt if any */
344 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
345 u8 pipr = be64_to_cpu(qw1) & 0xff;
347 pending |= 1 << pipr;
350 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
352 /* Return interrupt and old CPPR in GPR4 */
353 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
358 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
362 pending = xc->pending;
363 if (xc->mfrr != 0xff) {
365 pending |= 1 << xc->mfrr;
371 prio = ffs(pending) - 1;
373 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
376 static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
377 struct kvmppc_xive_vcpu *xc)
381 /* For each priority that is now masked */
382 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
383 struct xive_q *q = &xc->queues[prio];
384 struct kvmppc_xive_irq_state *state;
385 struct kvmppc_xive_src_block *sb;
386 u32 idx, toggle, entry, irq, hw_num;
387 struct xive_irq_data *xd;
393 qpage = READ_ONCE(q->qpage);
397 /* For each interrupt in the queue */
399 entry = be32_to_cpup(qpage + idx);
402 if ((entry >> 31) == toggle)
404 irq = entry & 0x7fffffff;
406 /* Skip dummies and IPIs */
407 if (irq == XICS_DUMMY || irq == XICS_IPI)
409 sb = kvmppc_xive_find_source(xive, irq, &src);
412 state = &sb->irq_state[src];
414 /* Has it been rerouted ? */
415 if (xc->server_num == state->act_server)
419 * Allright, it *has* been re-routed, kill it from
422 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
424 /* Find the HW interrupt */
425 kvmppc_xive_select_irq(state, &hw_num, &xd);
427 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
428 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
429 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
432 GLUE(X_PFX,source_eoi)(hw_num, xd);
435 idx = (idx + 1) & q->msk;
442 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
444 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
445 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
448 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
450 xc->GLUE(X_STAT_PFX,h_cppr)++;
453 cppr = xive_prio_from_guest(cppr);
455 /* Remember old and update SW state */
460 * Order the above update of xc->cppr with the subsequent
461 * read of xc->mfrr inside push_pending_to_hw()
465 if (cppr > old_cppr) {
467 * We are masking less, we need to look for pending things
468 * to deliver and set VP pending bits accordingly to trigger
469 * a new interrupt otherwise we might miss MFRR changes for
470 * which we have optimized out sending an IPI signal.
472 GLUE(X_PFX,push_pending_to_hw)(xc);
475 * We are masking more, we need to check the queue for any
476 * interrupt that has been routed to another CPU, take
477 * it out (replace it with the dummy) and retrigger it.
479 * This is necessary since those interrupts may otherwise
480 * never be processed, at least not until this CPU restores
483 * This is in theory racy vs. HW adding new interrupts to
484 * the queue. In practice this works because the interesting
485 * cases are when the guest has done a set_xive() to move the
486 * interrupt away, which flushes the xive, followed by the
487 * target CPU doing a H_CPPR. So any new interrupt coming into
488 * the queue must still be routed to us and isn't a source
491 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
496 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
501 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
503 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
504 struct kvmppc_xive_src_block *sb;
505 struct kvmppc_xive_irq_state *state;
506 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
507 struct xive_irq_data *xd;
508 u8 new_cppr = xirr >> 24;
509 u32 irq = xirr & 0x00ffffff, hw_num;
513 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
515 xc->GLUE(X_STAT_PFX,h_eoi)++;
517 xc->cppr = xive_prio_from_guest(new_cppr);
520 * IPIs are synthetized from MFRR and thus don't need
521 * any special EOI handling. The underlying interrupt
522 * used to signal MFRR changes is EOId when fetched from
525 if (irq == XICS_IPI || irq == 0) {
527 * This barrier orders the setting of xc->cppr vs.
528 * subsquent test of xc->mfrr done inside
529 * scan_interrupts and push_pending_to_hw
535 /* Find interrupt source */
536 sb = kvmppc_xive_find_source(xive, irq, &src);
538 pr_devel(" source not found !\n");
544 state = &sb->irq_state[src];
545 kvmppc_xive_select_irq(state, &hw_num, &xd);
547 state->in_eoi = true;
550 * This barrier orders both setting of in_eoi above vs,
551 * subsequent test of guest_priority, and the setting
552 * of xc->cppr vs. subsquent test of xc->mfrr done inside
553 * scan_interrupts and push_pending_to_hw
558 if (state->guest_priority == MASKED) {
559 arch_spin_lock(&sb->lock);
560 if (state->guest_priority != MASKED) {
561 arch_spin_unlock(&sb->lock);
564 pr_devel(" EOI on saved P...\n");
566 /* Clear old_p, that will cause unmask to perform an EOI */
567 state->old_p = false;
569 arch_spin_unlock(&sb->lock);
571 pr_devel(" EOI on source...\n");
573 /* Perform EOI on the source */
574 GLUE(X_PFX,source_eoi)(hw_num, xd);
576 /* If it's an emulated LSI, check level and resend */
577 if (state->lsi && state->asserted)
578 __x_writeq(0, __x_trig_page(xd));
583 * This barrier orders the above guest_priority check
584 * and spin_lock/unlock with clearing in_eoi below.
586 * It also has to be a full mb() as it must ensure
587 * the MMIOs done in source_eoi() are completed before
588 * state->in_eoi is visible.
591 state->in_eoi = false;
594 /* Re-evaluate pending IRQs and update HW */
595 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
596 GLUE(X_PFX,push_pending_to_hw)(xc);
597 pr_devel(" after scan pending=%02x\n", xc->pending);
600 xc->hw_cppr = xc->cppr;
601 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
606 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
609 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
611 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
613 xc->GLUE(X_STAT_PFX,h_ipi)++;
616 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
619 xc = vcpu->arch.xive_vcpu;
621 /* Locklessly write over MFRR */
625 * The load of xc->cppr below and the subsequent MMIO store
626 * to the IPI must happen after the above mfrr update is
627 * globally visible so that:
629 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
630 * updating xc->cppr then reading xc->mfrr.
632 * - The target of the IPI sees the xc->mfrr update
636 /* Shoot the IPI if most favored than target cppr */
638 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));