1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
6 #define pr_fmt(fmt) "xive-kvm: " fmt
8 #include <linux/kernel.h>
9 #include <linux/kvm_host.h>
10 #include <linux/err.h>
11 #include <linux/gfp.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/percpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/uaccess.h>
17 #include <linux/irqdomain.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
23 #include <asm/xive-regs.h>
24 #include <asm/debug.h>
25 #include <asm/debugfs.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
32 #include "book3s_xive.h"
36 * Virtual mode variants of the hcalls for use on radix/radix
37 * with AIL. They require the VCPU's VP to be "pushed"
39 * We still instantiate them here because we use some of the
40 * generated utility functions as well in this file.
42 #define XIVE_RUNTIME_CHECKS
43 #define X_PFX xive_vm_
44 #define X_STATIC static
45 #define X_STAT_PFX stat_vm_
46 #define __x_tima xive_tima
47 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
48 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
49 #define __x_writeb __raw_writeb
50 #define __x_readw __raw_readw
51 #define __x_readq __raw_readq
52 #define __x_writeq __raw_writeq
54 #include "book3s_xive_template.c"
57 * We leave a gap of a couple of interrupts in the queue to
58 * account for the IPI and additional safety guard.
63 * Push a vcpu's context to the XIVE on guest entry.
64 * This assumes we are in virtual mode (MMU on)
66 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
68 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
72 * Nothing to do if the platform doesn't have a XIVE
73 * or this vCPU doesn't have its own XIVE context
74 * (e.g. because it's not using an in-kernel interrupt controller).
76 if (!tima || !vcpu->arch.xive_cam_word)
80 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
81 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
82 vcpu->arch.xive_pushed = 1;
86 * We clear the irq_pending flag. There is a small chance of a
87 * race vs. the escalation interrupt happening on another
88 * processor setting it again, but the only consequence is to
89 * cause a spurious wakeup on the next H_CEDE, which is not an
92 vcpu->arch.irq_pending = 0;
95 * In single escalation mode, if the escalation interrupt is
98 if (vcpu->arch.xive_esc_on) {
99 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
100 XIVE_ESB_SET_PQ_01));
104 * We have a possible subtle race here: The escalation
105 * interrupt might have fired and be on its way to the
106 * host queue while we mask it, and if we unmask it
107 * early enough (re-cede right away), there is a
108 * theorical possibility that it fires again, thus
109 * landing in the target queue more than once which is
112 * Fortunately, solving this is rather easy. If the
113 * above load setting PQ to 01 returns a previous
114 * value where P is set, then we know the escalation
115 * interrupt is somewhere on its way to the host. In
116 * that case we simply don't clear the xive_esc_on
117 * flag below. It will be eventually cleared by the
118 * handler for the escalation interrupt.
120 * Then, when doing a cede, we check that flag again
121 * before re-enabling the escalation interrupt, and if
122 * set, we abort the cede.
124 if (!(pq & XIVE_ESB_VAL_P))
125 /* Now P is 0, we can clear the flag */
126 vcpu->arch.xive_esc_on = 0;
129 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
132 * Pull a vcpu's context from the XIVE on guest exit.
133 * This assumes we are in virtual mode (MMU on)
135 void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
137 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
139 if (!vcpu->arch.xive_pushed)
143 * Should not have been pushed if there is no tima
149 /* First load to pull the context, we ignore the value */
150 __raw_readl(tima + TM_SPC_PULL_OS_CTX);
151 /* Second load to recover the context state (Words 0 and 1) */
152 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
154 /* Fixup some of the state for the next load */
155 vcpu->arch.xive_saved_state.lsmfb = 0;
156 vcpu->arch.xive_saved_state.ack = 0xff;
157 vcpu->arch.xive_pushed = 0;
160 EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
162 void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
164 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
169 /* we are using XIVE with single escalation */
171 if (vcpu->arch.xive_esc_on) {
173 * If we still have a pending escalation, abort the cede,
174 * and we must set PQ to 10 rather than 00 so that we don't
175 * potentially end up with two entries for the escalation
176 * interrupt in the XIVE interrupt queue. In that case
177 * we also don't want to set xive_esc_on to 1 here in
178 * case we race with xive_esc_irq().
180 vcpu->arch.ceded = 0;
182 * The escalation interrupts are special as we don't EOI them.
183 * There is no need to use the load-after-store ordering offset
184 * to set PQ to 10 as we won't use StoreEOI.
186 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
188 vcpu->arch.xive_esc_on = true;
190 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
194 EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
197 * This is a simple trigger for a generic XIVE IRQ. This must
198 * only be called for interrupts that support a trigger page
200 static bool xive_irq_trigger(struct xive_irq_data *xd)
202 /* This should be only for MSIs */
203 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
206 /* Those interrupts should always have a trigger page */
207 if (WARN_ON(!xd->trig_mmio))
210 out_be64(xd->trig_mmio, 0);
215 static irqreturn_t xive_esc_irq(int irq, void *data)
217 struct kvm_vcpu *vcpu = data;
219 vcpu->arch.irq_pending = 1;
221 if (vcpu->arch.ceded)
222 kvmppc_fast_vcpu_kick(vcpu);
224 /* Since we have the no-EOI flag, the interrupt is effectively
225 * disabled now. Clearing xive_esc_on means we won't bother
226 * doing so on the next entry.
228 * This also allows the entry code to know that if a PQ combination
229 * of 10 is observed while xive_esc_on is true, it means the queue
230 * contains an unprocessed escalation interrupt. We don't make use of
231 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
233 vcpu->arch.xive_esc_on = false;
235 /* This orders xive_esc_on = false vs. subsequent stale_p = true */
236 smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */
241 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
242 bool single_escalation)
244 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
245 struct xive_q *q = &xc->queues[prio];
249 /* Already there ? */
250 if (xc->esc_virq[prio])
253 /* Hook up the escalation interrupt */
254 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
255 if (!xc->esc_virq[prio]) {
256 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
257 prio, xc->server_num);
261 if (single_escalation)
262 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
263 vcpu->kvm->arch.lpid, xc->server_num);
265 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
266 vcpu->kvm->arch.lpid, xc->server_num, prio);
268 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
269 prio, xc->server_num);
274 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
276 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
277 IRQF_NO_THREAD, name, vcpu);
279 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
280 prio, xc->server_num);
283 xc->esc_virq_names[prio] = name;
285 /* In single escalation mode, we grab the ESB MMIO of the
286 * interrupt and mask it. Also populate the VCPU v/raddr
287 * of the ESB page for use by asm entry/exit code. Finally
288 * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the
289 * core code from performing an EOI on the escalation
290 * interrupt, thus leaving it effectively masked after
293 if (single_escalation) {
294 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
295 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
297 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
298 vcpu->arch.xive_esc_raddr = xd->eoi_page;
299 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
300 xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
305 irq_dispose_mapping(xc->esc_virq[prio]);
306 xc->esc_virq[prio] = 0;
311 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
313 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
314 struct kvmppc_xive *xive = xc->xive;
315 struct xive_q *q = &xc->queues[prio];
319 if (WARN_ON(q->qpage))
322 /* Allocate the queue and retrieve infos on current node for now */
323 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
325 pr_err("Failed to allocate queue %d for VCPU %d\n",
326 prio, xc->server_num);
329 memset(qpage, 0, 1 << xive->q_order);
332 * Reconfigure the queue. This will set q->qpage only once the
333 * queue is fully configured. This is a requirement for prio 0
334 * as we will stop doing EOIs for every IPI as soon as we observe
335 * qpage being non-NULL, and instead will only EOI when we receive
336 * corresponding queue 0 entries
338 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
339 xive->q_order, true);
341 pr_err("Failed to configure queue %d for VCPU %d\n",
342 prio, xc->server_num);
346 /* Called with xive->lock held */
347 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
349 struct kvmppc_xive *xive = kvm->arch.xive;
350 struct kvm_vcpu *vcpu;
353 lockdep_assert_held(&xive->lock);
355 /* Already provisioned ? */
356 if (xive->qmap & (1 << prio))
359 pr_devel("Provisioning prio... %d\n", prio);
361 /* Provision each VCPU and enable escalations if needed */
362 kvm_for_each_vcpu(i, vcpu, kvm) {
363 if (!vcpu->arch.xive_vcpu)
365 rc = xive_provision_queue(vcpu, prio);
366 if (rc == 0 && !xive->single_escalation)
367 kvmppc_xive_attach_escalation(vcpu, prio,
368 xive->single_escalation);
373 /* Order previous stores and mark it as provisioned */
375 xive->qmap |= (1 << prio);
379 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
381 struct kvm_vcpu *vcpu;
382 struct kvmppc_xive_vcpu *xc;
385 /* Locate target server */
386 vcpu = kvmppc_xive_find_server(kvm, server);
388 pr_warn("%s: Can't find server %d\n", __func__, server);
391 xc = vcpu->arch.xive_vcpu;
395 q = &xc->queues[prio];
396 atomic_inc(&q->pending_count);
399 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
401 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
410 q = &xc->queues[prio];
411 if (WARN_ON(!q->qpage))
414 /* Calculate max number of interrupts in that queue. */
415 max = (q->msk + 1) - XIVE_Q_GAP;
416 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
419 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
421 struct kvm_vcpu *vcpu;
424 /* Locate target server */
425 vcpu = kvmppc_xive_find_server(kvm, *server);
427 pr_devel("Can't find server %d\n", *server);
431 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
434 rc = xive_try_pick_queue(vcpu, prio);
438 pr_devel(" .. failed, looking up candidate...\n");
440 /* Failed, pick another VCPU */
441 kvm_for_each_vcpu(i, vcpu, kvm) {
442 if (!vcpu->arch.xive_vcpu)
444 rc = xive_try_pick_queue(vcpu, prio);
446 *server = vcpu->arch.xive_vcpu->server_num;
447 pr_devel(" found on 0x%x/%d\n", *server, prio);
451 pr_devel(" no available target !\n");
453 /* No available target ! */
457 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
458 struct kvmppc_xive_src_block *sb,
459 struct kvmppc_xive_irq_state *state)
461 struct xive_irq_data *xd;
467 * Take the lock, set masked, try again if racing
471 arch_spin_lock(&sb->lock);
472 old_prio = state->guest_priority;
473 state->guest_priority = MASKED;
477 state->guest_priority = old_prio;
478 arch_spin_unlock(&sb->lock);
481 /* No change ? Bail */
482 if (old_prio == MASKED)
485 /* Get the right irq */
486 kvmppc_xive_select_irq(state, &hw_num, &xd);
488 /* Set PQ to 10, return old P and old Q and remember them */
489 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
490 state->old_p = !!(val & 2);
491 state->old_q = !!(val & 1);
494 * Synchronize hardware to sensure the queues are updated when
497 xive_native_sync_source(hw_num);
502 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
503 struct kvmppc_xive_irq_state *state)
506 * Take the lock try again if racing with H_EOI
509 arch_spin_lock(&sb->lock);
512 arch_spin_unlock(&sb->lock);
516 static void xive_finish_unmask(struct kvmppc_xive *xive,
517 struct kvmppc_xive_src_block *sb,
518 struct kvmppc_xive_irq_state *state,
521 struct xive_irq_data *xd;
524 /* If we aren't changing a thing, move on */
525 if (state->guest_priority != MASKED)
528 /* Get the right irq */
529 kvmppc_xive_select_irq(state, &hw_num, &xd);
531 /* Old Q set, set PQ to 11 */
533 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
536 * If not old P, then perform an "effective" EOI,
537 * on the source. This will handle the cases where
541 xive_vm_source_eoi(hw_num, xd);
543 /* Synchronize ordering and mark unmasked */
546 state->guest_priority = prio;
550 * Target an interrupt to a given server/prio, this will fallback
551 * to another server if necessary and perform the HW targetting
554 * NOTE: Must be called with the state lock held
556 static int xive_target_interrupt(struct kvm *kvm,
557 struct kvmppc_xive_irq_state *state,
560 struct kvmppc_xive *xive = kvm->arch.xive;
565 * This will return a tentative server and actual
566 * priority. The count for that new target will have
567 * already been incremented.
569 rc = kvmppc_xive_select_target(kvm, &server, prio);
572 * We failed to find a target ? Not much we can do
573 * at least until we support the GIQ.
579 * Increment the old queue pending count if there
580 * was one so that the old queue count gets adjusted later
581 * when observed to be empty.
583 if (state->act_priority != MASKED)
584 xive_inc_q_pending(kvm,
586 state->act_priority);
588 * Update state and HW
590 state->act_priority = prio;
591 state->act_server = server;
593 /* Get the right irq */
594 kvmppc_xive_select_irq(state, &hw_num, NULL);
596 return xive_native_configure_irq(hw_num,
597 kvmppc_xive_vp(xive, server),
598 prio, state->number);
602 * Targetting rules: In order to avoid losing track of
603 * pending interrupts accross mask and unmask, which would
604 * allow queue overflows, we implement the following rules:
606 * - Unless it was never enabled (or we run out of capacity)
607 * an interrupt is always targetted at a valid server/queue
608 * pair even when "masked" by the guest. This pair tends to
609 * be the last one used but it can be changed under some
610 * circumstances. That allows us to separate targetting
611 * from masking, we only handle accounting during (re)targetting,
612 * this also allows us to let an interrupt drain into its target
613 * queue after masking, avoiding complex schemes to remove
614 * interrupts out of remote processor queues.
616 * - When masking, we set PQ to 10 and save the previous value
619 * - When unmasking, if saved Q was set, we set PQ to 11
620 * otherwise we leave PQ to the HW state which will be either
621 * 10 if nothing happened or 11 if the interrupt fired while
622 * masked. Effectively we are OR'ing the previous Q into the
625 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
626 * which will unmask the interrupt and shoot a new one if Q was
629 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
630 * effectively meaning an H_EOI from the guest is still expected
631 * for that interrupt).
633 * - If H_EOI occurs while masked, we clear the saved P.
635 * - When changing target, we account on the new target and
636 * increment a separate "pending" counter on the old one.
637 * This pending counter will be used to decrement the old
638 * target's count when its queue has been observed empty.
641 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
644 struct kvmppc_xive *xive = kvm->arch.xive;
645 struct kvmppc_xive_src_block *sb;
646 struct kvmppc_xive_irq_state *state;
654 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
655 irq, server, priority);
657 /* First, check provisioning of queues */
658 if (priority != MASKED) {
659 mutex_lock(&xive->lock);
660 rc = xive_check_provisioning(xive->kvm,
661 xive_prio_from_guest(priority));
662 mutex_unlock(&xive->lock);
665 pr_devel(" provisioning failure %d !\n", rc);
669 sb = kvmppc_xive_find_source(xive, irq, &idx);
672 state = &sb->irq_state[idx];
675 * We first handle masking/unmasking since the locking
676 * might need to be retried due to EOIs, we'll handle
677 * targetting changes later. These functions will return
678 * with the SB lock held.
680 * xive_lock_and_mask() will also set state->guest_priority
681 * but won't otherwise change other fields of the state.
683 * xive_lock_for_unmask will not actually unmask, this will
684 * be done later by xive_finish_unmask() once the targetting
685 * has been done, so we don't try to unmask an interrupt
686 * that hasn't yet been targetted.
688 if (priority == MASKED)
689 xive_lock_and_mask(xive, sb, state);
691 xive_lock_for_unmask(sb, state);
695 * Then we handle targetting.
697 * First calculate a new "actual priority"
699 new_act_prio = state->act_priority;
700 if (priority != MASKED)
701 new_act_prio = xive_prio_from_guest(priority);
703 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
704 new_act_prio, state->act_server, state->act_priority);
707 * Then check if we actually need to change anything,
709 * The condition for re-targetting the interrupt is that
710 * we have a valid new priority (new_act_prio is not 0xff)
711 * and either the server or the priority changed.
713 * Note: If act_priority was ff and the new priority is
714 * also ff, we don't do anything and leave the interrupt
715 * untargetted. An attempt of doing an int_on on an
716 * untargetted interrupt will fail. If that is a problem
717 * we could initialize interrupts with valid default
720 if (new_act_prio != MASKED &&
721 (state->act_server != server ||
722 state->act_priority != new_act_prio))
723 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
726 * Perform the final unmasking of the interrupt source
729 if (priority != MASKED)
730 xive_finish_unmask(xive, sb, state, priority);
733 * Finally Update saved_priority to match. Only int_on/off
734 * set this field to a different value.
736 state->saved_priority = priority;
738 arch_spin_unlock(&sb->lock);
742 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
745 struct kvmppc_xive *xive = kvm->arch.xive;
746 struct kvmppc_xive_src_block *sb;
747 struct kvmppc_xive_irq_state *state;
753 sb = kvmppc_xive_find_source(xive, irq, &idx);
756 state = &sb->irq_state[idx];
757 arch_spin_lock(&sb->lock);
758 *server = state->act_server;
759 *priority = state->guest_priority;
760 arch_spin_unlock(&sb->lock);
765 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
767 struct kvmppc_xive *xive = kvm->arch.xive;
768 struct kvmppc_xive_src_block *sb;
769 struct kvmppc_xive_irq_state *state;
775 sb = kvmppc_xive_find_source(xive, irq, &idx);
778 state = &sb->irq_state[idx];
780 pr_devel("int_on(irq=0x%x)\n", irq);
783 * Check if interrupt was not targetted
785 if (state->act_priority == MASKED) {
786 pr_devel("int_on on untargetted interrupt\n");
790 /* If saved_priority is 0xff, do nothing */
791 if (state->saved_priority == MASKED)
795 * Lock and unmask it.
797 xive_lock_for_unmask(sb, state);
798 xive_finish_unmask(xive, sb, state, state->saved_priority);
799 arch_spin_unlock(&sb->lock);
804 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
806 struct kvmppc_xive *xive = kvm->arch.xive;
807 struct kvmppc_xive_src_block *sb;
808 struct kvmppc_xive_irq_state *state;
814 sb = kvmppc_xive_find_source(xive, irq, &idx);
817 state = &sb->irq_state[idx];
819 pr_devel("int_off(irq=0x%x)\n", irq);
824 state->saved_priority = xive_lock_and_mask(xive, sb, state);
825 arch_spin_unlock(&sb->lock);
830 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
832 struct kvmppc_xive_src_block *sb;
833 struct kvmppc_xive_irq_state *state;
836 sb = kvmppc_xive_find_source(xive, irq, &idx);
839 state = &sb->irq_state[idx];
844 * Trigger the IPI. This assumes we never restore a pass-through
845 * interrupt which should be safe enough
847 xive_irq_trigger(&state->ipi_data);
852 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
854 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
859 /* Return the per-cpu state for state saving/migration */
860 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
861 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
862 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
865 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
867 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
868 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
875 /* Grab individual state fields. We don't use pending_pri */
876 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
877 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
878 KVM_REG_PPC_ICP_XISR_MASK;
879 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
881 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
882 xc->server_num, cppr, mfrr, xisr);
885 * We can't update the state of a "pushed" VCPU, but that
886 * shouldn't happen because the vcpu->mutex makes running a
887 * vcpu mutually exclusive with doing one_reg get/set on it.
889 if (WARN_ON(vcpu->arch.xive_pushed))
892 /* Update VCPU HW saved state */
893 vcpu->arch.xive_saved_state.cppr = cppr;
894 xc->hw_cppr = xc->cppr = cppr;
897 * Update MFRR state. If it's not 0xff, we mark the VCPU as
898 * having a pending MFRR change, which will re-evaluate the
899 * target. The VCPU will thus potentially get a spurious
900 * interrupt but that's not a big deal.
904 xive_irq_trigger(&xc->vp_ipi_data);
907 * Now saved XIRR is "interesting". It means there's something in
908 * the legacy "1 element" queue... for an IPI we simply ignore it,
909 * as the MFRR restore will handle that. For anything else we need
910 * to force a resend of the source.
911 * However the source may not have been setup yet. If that's the
912 * case, we keep that info and increment a counter in the xive to
913 * tell subsequent xive_set_source() to go look.
915 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
916 xc->delayed_irq = xisr;
917 xive->delayed_irqs++;
918 pr_devel(" xisr restore delayed\n");
924 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
925 struct irq_desc *host_desc)
927 struct kvmppc_xive *xive = kvm->arch.xive;
928 struct kvmppc_xive_src_block *sb;
929 struct kvmppc_xive_irq_state *state;
930 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
931 unsigned int host_irq = irq_desc_get_irq(host_desc);
932 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
940 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
942 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
945 state = &sb->irq_state[idx];
948 * Mark the passed-through interrupt as going to a VCPU,
949 * this will prevent further EOIs and similar operations
950 * from the XIVE code. It will also mask the interrupt
951 * to either PQ=10 or 11 state, the latter if the interrupt
952 * is pending. This will allow us to unmask or retrigger it
953 * after routing it to the guest with a simple EOI.
955 * The "state" argument is a "token", all it needs is to be
956 * non-NULL to switch to passed-through or NULL for the
957 * other way around. We may not yet have an actual VCPU
958 * target here and we don't really care.
960 rc = irq_set_vcpu_affinity(host_irq, state);
962 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
967 * Mask and read state of IPI. We need to know if its P bit
968 * is set as that means it's potentially already using a
969 * queue entry in the target
971 prio = xive_lock_and_mask(xive, sb, state);
972 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
973 state->old_p, state->old_q);
975 /* Turn the IPI hard off */
976 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
979 * Reset ESB guest mapping. Needed when ESB pages are exposed
980 * to the guest in XIVE native mode
982 if (xive->ops && xive->ops->reset_mapped)
983 xive->ops->reset_mapped(kvm, guest_irq);
985 /* Grab info about irq */
986 state->pt_number = hw_irq;
987 state->pt_data = irq_data_get_irq_handler_data(host_data);
990 * Configure the IRQ to match the existing configuration of
991 * the IPI if it was already targetted. Otherwise this will
992 * mask the interrupt in a lossy way (act_priority is 0xff)
993 * which is fine for a never started interrupt.
995 xive_native_configure_irq(hw_irq,
996 kvmppc_xive_vp(xive, state->act_server),
997 state->act_priority, state->number);
1000 * We do an EOI to enable the interrupt (and retrigger if needed)
1001 * if the guest has the interrupt unmasked and the P bit was *not*
1002 * set in the IPI. If it was set, we know a slot may still be in
1003 * use in the target queue thus we have to wait for a guest
1006 if (prio != MASKED && !state->old_p)
1007 xive_vm_source_eoi(hw_irq, state->pt_data);
1009 /* Clear old_p/old_q as they are no longer relevant */
1010 state->old_p = state->old_q = false;
1012 /* Restore guest prio (unlocks EOI) */
1014 state->guest_priority = prio;
1015 arch_spin_unlock(&sb->lock);
1019 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1021 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1022 struct irq_desc *host_desc)
1024 struct kvmppc_xive *xive = kvm->arch.xive;
1025 struct kvmppc_xive_src_block *sb;
1026 struct kvmppc_xive_irq_state *state;
1027 unsigned int host_irq = irq_desc_get_irq(host_desc);
1035 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1037 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1040 state = &sb->irq_state[idx];
1043 * Mask and read state of IRQ. We need to know if its P bit
1044 * is set as that means it's potentially already using a
1045 * queue entry in the target
1047 prio = xive_lock_and_mask(xive, sb, state);
1048 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1049 state->old_p, state->old_q);
1052 * If old_p is set, the interrupt is pending, we switch it to
1053 * PQ=11. This will force a resend in the host so the interrupt
1054 * isn't lost to whatver host driver may pick it up
1057 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1059 /* Release the passed-through interrupt to the host */
1060 rc = irq_set_vcpu_affinity(host_irq, NULL);
1062 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1066 /* Forget about the IRQ */
1067 state->pt_number = 0;
1068 state->pt_data = NULL;
1071 * Reset ESB guest mapping. Needed when ESB pages are exposed
1072 * to the guest in XIVE native mode
1074 if (xive->ops && xive->ops->reset_mapped) {
1075 xive->ops->reset_mapped(kvm, guest_irq);
1078 /* Reconfigure the IPI */
1079 xive_native_configure_irq(state->ipi_number,
1080 kvmppc_xive_vp(xive, state->act_server),
1081 state->act_priority, state->number);
1084 * If old_p is set (we have a queue entry potentially
1085 * occupied) or the interrupt is masked, we set the IPI
1086 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1088 if (prio == MASKED || state->old_p)
1089 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1091 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1093 /* Restore guest prio (unlocks EOI) */
1095 state->guest_priority = prio;
1096 arch_spin_unlock(&sb->lock);
1100 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1102 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1104 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1105 struct kvm *kvm = vcpu->kvm;
1106 struct kvmppc_xive *xive = kvm->arch.xive;
1109 for (i = 0; i <= xive->max_sbid; i++) {
1110 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1114 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1115 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1119 if (state->act_priority == MASKED)
1121 if (state->act_server != xc->server_num)
1125 arch_spin_lock(&sb->lock);
1126 state->act_priority = MASKED;
1127 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1128 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1129 if (state->pt_number) {
1130 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1131 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1133 arch_spin_unlock(&sb->lock);
1137 /* Disable vcpu's escalation interrupt */
1138 if (vcpu->arch.xive_esc_on) {
1139 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1140 XIVE_ESB_SET_PQ_01));
1141 vcpu->arch.xive_esc_on = false;
1145 * Clear pointers to escalation interrupt ESB.
1146 * This is safe because the vcpu->mutex is held, preventing
1147 * any other CPU from concurrently executing a KVM_RUN ioctl.
1149 vcpu->arch.xive_esc_vaddr = 0;
1150 vcpu->arch.xive_esc_raddr = 0;
1154 * In single escalation mode, the escalation interrupt is marked so
1155 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1156 * indicate that the P bit has already been dealt with. However, the
1157 * assembly code that enters the guest sets PQ to 00 without clearing
1158 * stale_p (because it has no easy way to address it). Hence we have
1159 * to adjust stale_p before shutting down the interrupt.
1161 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1162 struct kvmppc_xive_vcpu *xc, int irq)
1164 struct irq_data *d = irq_get_irq_data(irq);
1165 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1168 * This slightly odd sequence gives the right result
1169 * (i.e. stale_p set if xive_esc_on is false) even if
1170 * we race with xive_esc_irq() and xive_irq_eoi().
1172 xd->stale_p = false;
1173 smp_mb(); /* paired with smb_wmb in xive_esc_irq */
1174 if (!vcpu->arch.xive_esc_on)
1178 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1180 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1181 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1184 if (!kvmppc_xics_enabled(vcpu))
1190 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1192 /* Ensure no interrupt is still routed to that VP */
1194 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1196 /* Mask the VP IPI */
1197 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1199 /* Free escalations */
1200 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1201 if (xc->esc_virq[i]) {
1202 if (xc->xive->single_escalation)
1203 xive_cleanup_single_escalation(vcpu, xc,
1205 free_irq(xc->esc_virq[i], vcpu);
1206 irq_dispose_mapping(xc->esc_virq[i]);
1207 kfree(xc->esc_virq_names[i]);
1211 /* Disable the VP */
1212 xive_native_disable_vp(xc->vp_id);
1214 /* Clear the cam word so guest entry won't try to push context */
1215 vcpu->arch.xive_cam_word = 0;
1217 /* Free the queues */
1218 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1219 struct xive_q *q = &xc->queues[i];
1221 xive_native_disable_queue(xc->vp_id, q, i);
1223 free_pages((unsigned long)q->qpage,
1224 xive->q_page_order);
1231 xive_cleanup_irq_data(&xc->vp_ipi_data);
1232 xive_native_free_irq(xc->vp_ipi);
1237 /* Cleanup the vcpu */
1238 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1239 vcpu->arch.xive_vcpu = NULL;
1242 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1244 /* We have a block of xive->nr_servers VPs. We just need to check
1245 * packed vCPU ids are below that.
1247 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1250 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1254 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1255 pr_devel("Out of bounds !\n");
1259 if (xive->vp_base == XIVE_INVALID_VP) {
1260 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1261 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1263 if (xive->vp_base == XIVE_INVALID_VP)
1267 vp_id = kvmppc_xive_vp(xive, cpu);
1268 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1269 pr_devel("Duplicate !\n");
1278 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1279 struct kvm_vcpu *vcpu, u32 cpu)
1281 struct kvmppc_xive *xive = dev->private;
1282 struct kvmppc_xive_vcpu *xc;
1286 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1288 if (dev->ops != &kvm_xive_ops) {
1289 pr_devel("Wrong ops !\n");
1292 if (xive->kvm != vcpu->kvm)
1294 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1297 /* We need to synchronize with queue provisioning */
1298 mutex_lock(&xive->lock);
1300 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1304 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1310 vcpu->arch.xive_vcpu = xc;
1313 xc->server_num = cpu;
1318 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1322 /* Configure VCPU fields for use by assembly push/pull */
1323 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1324 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1327 xc->vp_ipi = xive_native_alloc_irq();
1329 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1333 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1335 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1340 * Enable the VP first as the single escalation mode will
1341 * affect escalation interrupts numbering
1343 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1345 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1350 * Initialize queues. Initially we set them all for no queueing
1351 * and we enable escalation for queue 0 only which we'll use for
1352 * our mfrr change notifications. If the VCPU is hot-plugged, we
1353 * do handle provisioning however based on the existing "map"
1354 * of enabled queues.
1356 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1357 struct xive_q *q = &xc->queues[i];
1359 /* Single escalation, no queue 7 */
1360 if (i == 7 && xive->single_escalation)
1363 /* Is queue already enabled ? Provision it */
1364 if (xive->qmap & (1 << i)) {
1365 r = xive_provision_queue(vcpu, i);
1366 if (r == 0 && !xive->single_escalation)
1367 kvmppc_xive_attach_escalation(
1368 vcpu, i, xive->single_escalation);
1372 r = xive_native_configure_queue(xc->vp_id,
1373 q, i, NULL, 0, true);
1375 pr_err("Failed to configure queue %d for VCPU %d\n",
1382 /* If not done above, attach priority 0 escalation */
1383 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1388 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1390 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1393 mutex_unlock(&xive->lock);
1395 kvmppc_xive_cleanup_vcpu(vcpu);
1399 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1404 * Scanning of queues before/after migration save
1406 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1408 struct kvmppc_xive_src_block *sb;
1409 struct kvmppc_xive_irq_state *state;
1412 sb = kvmppc_xive_find_source(xive, irq, &idx);
1416 state = &sb->irq_state[idx];
1418 /* Some sanity checking */
1419 if (!state->valid) {
1420 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1425 * If the interrupt is in a queue it should have P set.
1426 * We warn so that gets reported. A backtrace isn't useful
1427 * so no need to use a WARN_ON.
1429 if (!state->saved_p)
1430 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1433 state->in_queue = true;
1436 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1437 struct kvmppc_xive_src_block *sb,
1440 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1445 /* Mask and save state, this will also sync HW queues */
1446 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1448 /* Transfer P and Q */
1449 state->saved_p = state->old_p;
1450 state->saved_q = state->old_q;
1453 arch_spin_unlock(&sb->lock);
1456 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1457 struct kvmppc_xive_src_block *sb,
1460 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1466 * Lock / exclude EOI (not technically necessary if the
1467 * guest isn't running concurrently. If this becomes a
1468 * performance issue we can probably remove the lock.
1470 xive_lock_for_unmask(sb, state);
1472 /* Restore mask/prio if it wasn't masked */
1473 if (state->saved_scan_prio != MASKED)
1474 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1477 arch_spin_unlock(&sb->lock);
1480 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1483 u32 toggle = q->toggle;
1487 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1489 xive_pre_save_set_queued(xive, irq);
1493 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1495 struct kvm_vcpu *vcpu = NULL;
1499 * See comment in xive_get_source() about how this
1500 * work. Collect a stable state for all interrupts
1502 for (i = 0; i <= xive->max_sbid; i++) {
1503 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1506 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1507 xive_pre_save_mask_irq(xive, sb, j);
1510 /* Then scan the queues and update the "in_queue" flag */
1511 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1512 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1515 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1516 if (xc->queues[j].qpage)
1517 xive_pre_save_queue(xive, &xc->queues[j]);
1521 /* Finally restore interrupt states */
1522 for (i = 0; i <= xive->max_sbid; i++) {
1523 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1526 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1527 xive_pre_save_unmask_irq(xive, sb, j);
1531 static void xive_post_save_scan(struct kvmppc_xive *xive)
1535 /* Clear all the in_queue flags */
1536 for (i = 0; i <= xive->max_sbid; i++) {
1537 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1540 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1541 sb->irq_state[j].in_queue = false;
1544 /* Next get_source() will do a new scan */
1545 xive->saved_src_count = 0;
1549 * This returns the source configuration and state to user space.
1551 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1553 struct kvmppc_xive_src_block *sb;
1554 struct kvmppc_xive_irq_state *state;
1555 u64 __user *ubufp = (u64 __user *) addr;
1559 sb = kvmppc_xive_find_source(xive, irq, &idx);
1563 state = &sb->irq_state[idx];
1568 pr_devel("get_source(%ld)...\n", irq);
1571 * So to properly save the state into something that looks like a
1572 * XICS migration stream we cannot treat interrupts individually.
1574 * We need, instead, mask them all (& save their previous PQ state)
1575 * to get a stable state in the HW, then sync them to ensure that
1576 * any interrupt that had already fired hits its queue, and finally
1577 * scan all the queues to collect which interrupts are still present
1578 * in the queues, so we can set the "pending" flag on them and
1579 * they can be resent on restore.
1581 * So we do it all when the "first" interrupt gets saved, all the
1582 * state is collected at that point, the rest of xive_get_source()
1583 * will merely collect and convert that state to the expected
1584 * userspace bit mask.
1586 if (xive->saved_src_count == 0)
1587 xive_pre_save_scan(xive);
1588 xive->saved_src_count++;
1590 /* Convert saved state into something compatible with xics */
1591 val = state->act_server;
1592 prio = state->saved_scan_prio;
1594 if (prio == MASKED) {
1595 val |= KVM_XICS_MASKED;
1596 prio = state->saved_priority;
1598 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1600 val |= KVM_XICS_LEVEL_SENSITIVE;
1602 val |= KVM_XICS_PENDING;
1605 val |= KVM_XICS_PRESENTED;
1608 val |= KVM_XICS_QUEUED;
1611 * We mark it pending (which will attempt a re-delivery)
1612 * if we are in a queue *or* we were masked and had
1613 * Q set which is equivalent to the XICS "masked pending"
1616 if (state->in_queue || (prio == MASKED && state->saved_q))
1617 val |= KVM_XICS_PENDING;
1621 * If that was the last interrupt saved, reset the
1624 if (xive->saved_src_count == xive->src_count)
1625 xive_post_save_scan(xive);
1627 /* Copy the result to userspace */
1628 if (put_user(val, ubufp))
1634 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1635 struct kvmppc_xive *xive, int irq)
1637 struct kvmppc_xive_src_block *sb;
1640 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1642 mutex_lock(&xive->lock);
1644 /* block already exists - somebody else got here first */
1645 if (xive->src_blocks[bid])
1648 /* Create the ICS */
1649 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1655 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1656 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1657 sb->irq_state[i].eisn = 0;
1658 sb->irq_state[i].guest_priority = MASKED;
1659 sb->irq_state[i].saved_priority = MASKED;
1660 sb->irq_state[i].act_priority = MASKED;
1663 xive->src_blocks[bid] = sb;
1665 if (bid > xive->max_sbid)
1666 xive->max_sbid = bid;
1669 mutex_unlock(&xive->lock);
1670 return xive->src_blocks[bid];
1673 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1675 struct kvm *kvm = xive->kvm;
1676 struct kvm_vcpu *vcpu = NULL;
1679 kvm_for_each_vcpu(i, vcpu, kvm) {
1680 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1685 if (xc->delayed_irq == irq) {
1686 xc->delayed_irq = 0;
1687 xive->delayed_irqs--;
1694 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1696 struct kvmppc_xive_src_block *sb;
1697 struct kvmppc_xive_irq_state *state;
1698 u64 __user *ubufp = (u64 __user *) addr;
1701 u8 act_prio, guest_prio;
1705 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1708 pr_devel("set_source(irq=0x%lx)\n", irq);
1710 /* Find the source */
1711 sb = kvmppc_xive_find_source(xive, irq, &idx);
1713 pr_devel("No source, creating source block...\n");
1714 sb = kvmppc_xive_create_src_block(xive, irq);
1716 pr_devel("Failed to create block...\n");
1720 state = &sb->irq_state[idx];
1722 /* Read user passed data */
1723 if (get_user(val, ubufp)) {
1724 pr_devel("fault getting user info !\n");
1728 server = val & KVM_XICS_DESTINATION_MASK;
1729 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1731 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1732 val, server, guest_prio);
1735 * If the source doesn't already have an IPI, allocate
1736 * one and get the corresponding data
1738 if (!state->ipi_number) {
1739 state->ipi_number = xive_native_alloc_irq();
1740 if (state->ipi_number == 0) {
1741 pr_devel("Failed to allocate IPI !\n");
1744 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1745 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1749 * We use lock_and_mask() to set us in the right masked
1750 * state. We will override that state from the saved state
1751 * further down, but this will handle the cases of interrupts
1752 * that need FW masking. We set the initial guest_priority to
1753 * 0 before calling it to ensure it actually performs the masking.
1755 state->guest_priority = 0;
1756 xive_lock_and_mask(xive, sb, state);
1759 * Now, we select a target if we have one. If we don't we
1760 * leave the interrupt untargetted. It means that an interrupt
1761 * can become "untargetted" accross migration if it was masked
1762 * by set_xive() but there is little we can do about it.
1765 /* First convert prio and mark interrupt as untargetted */
1766 act_prio = xive_prio_from_guest(guest_prio);
1767 state->act_priority = MASKED;
1770 * We need to drop the lock due to the mutex below. Hopefully
1771 * nothing is touching that interrupt yet since it hasn't been
1772 * advertized to a running guest yet
1774 arch_spin_unlock(&sb->lock);
1776 /* If we have a priority target the interrupt */
1777 if (act_prio != MASKED) {
1778 /* First, check provisioning of queues */
1779 mutex_lock(&xive->lock);
1780 rc = xive_check_provisioning(xive->kvm, act_prio);
1781 mutex_unlock(&xive->lock);
1783 /* Target interrupt */
1785 rc = xive_target_interrupt(xive->kvm, state,
1788 * If provisioning or targetting failed, leave it
1789 * alone and masked. It will remain disabled until
1790 * the guest re-targets it.
1795 * Find out if this was a delayed irq stashed in an ICP,
1796 * in which case, treat it as pending
1798 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1799 val |= KVM_XICS_PENDING;
1800 pr_devel(" Found delayed ! forcing PENDING !\n");
1803 /* Cleanup the SW state */
1804 state->old_p = false;
1805 state->old_q = false;
1807 state->asserted = false;
1809 /* Restore LSI state */
1810 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1812 if (val & KVM_XICS_PENDING)
1813 state->asserted = true;
1814 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1818 * Restore P and Q. If the interrupt was pending, we
1819 * force Q and !P, which will trigger a resend.
1821 * That means that a guest that had both an interrupt
1822 * pending (queued) and Q set will restore with only
1823 * one instance of that interrupt instead of 2, but that
1824 * is perfectly fine as coalescing interrupts that haven't
1825 * been presented yet is always allowed.
1827 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1828 state->old_p = true;
1829 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1830 state->old_q = true;
1832 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1835 * If the interrupt was unmasked, update guest priority and
1836 * perform the appropriate state transition and do a
1837 * re-trigger if necessary.
1839 if (val & KVM_XICS_MASKED) {
1840 pr_devel(" masked, saving prio\n");
1841 state->guest_priority = MASKED;
1842 state->saved_priority = guest_prio;
1844 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1845 xive_finish_unmask(xive, sb, state, guest_prio);
1846 state->saved_priority = guest_prio;
1849 /* Increment the number of valid sources and mark this one valid */
1852 state->valid = true;
1857 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1860 struct kvmppc_xive *xive = kvm->arch.xive;
1861 struct kvmppc_xive_src_block *sb;
1862 struct kvmppc_xive_irq_state *state;
1868 sb = kvmppc_xive_find_source(xive, irq, &idx);
1872 /* Perform locklessly .... (we need to do some RCUisms here...) */
1873 state = &sb->irq_state[idx];
1877 /* We don't allow a trigger on a passed-through interrupt */
1878 if (state->pt_number)
1881 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1882 state->asserted = true;
1883 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1884 state->asserted = false;
1888 /* Trigger the IPI */
1889 xive_irq_trigger(&state->ipi_data);
1894 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1896 u32 __user *ubufp = (u32 __user *) addr;
1900 if (get_user(nr_servers, ubufp))
1903 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1905 if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1908 mutex_lock(&xive->lock);
1909 if (xive->vp_base != XIVE_INVALID_VP)
1910 /* The VP block is allocated once and freed when the device
1911 * is released. Better not allow to change its size since its
1912 * used by connect_vcpu to validate vCPU ids are valid (eg,
1913 * setting it back to a higher value could allow connect_vcpu
1914 * to come up with a VP id that goes beyond the VP block, which
1915 * is likely to cause a crash in OPAL).
1918 else if (nr_servers > KVM_MAX_VCPUS)
1919 /* We don't need more servers. Higher vCPU ids get packed
1920 * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
1922 xive->nr_servers = KVM_MAX_VCPUS;
1924 xive->nr_servers = nr_servers;
1926 mutex_unlock(&xive->lock);
1931 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1933 struct kvmppc_xive *xive = dev->private;
1935 /* We honor the existing XICS ioctl */
1936 switch (attr->group) {
1937 case KVM_DEV_XICS_GRP_SOURCES:
1938 return xive_set_source(xive, attr->attr, attr->addr);
1939 case KVM_DEV_XICS_GRP_CTRL:
1940 switch (attr->attr) {
1941 case KVM_DEV_XICS_NR_SERVERS:
1942 return kvmppc_xive_set_nr_servers(xive, attr->addr);
1948 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1950 struct kvmppc_xive *xive = dev->private;
1952 /* We honor the existing XICS ioctl */
1953 switch (attr->group) {
1954 case KVM_DEV_XICS_GRP_SOURCES:
1955 return xive_get_source(xive, attr->attr, attr->addr);
1960 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1962 /* We honor the same limits as XICS, at least for now */
1963 switch (attr->group) {
1964 case KVM_DEV_XICS_GRP_SOURCES:
1965 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1966 attr->attr < KVMPPC_XICS_NR_IRQS)
1969 case KVM_DEV_XICS_GRP_CTRL:
1970 switch (attr->attr) {
1971 case KVM_DEV_XICS_NR_SERVERS:
1978 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1980 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1981 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1984 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1988 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1989 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1994 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1995 xive_cleanup_irq_data(&state->ipi_data);
1996 xive_native_free_irq(state->ipi_number);
1998 /* Pass-through, cleanup too but keep IRQ hw data */
1999 if (state->pt_number)
2000 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2002 state->valid = false;
2007 * Called when device fd is closed. kvm->lock is held.
2009 static void kvmppc_xive_release(struct kvm_device *dev)
2011 struct kvmppc_xive *xive = dev->private;
2012 struct kvm *kvm = xive->kvm;
2013 struct kvm_vcpu *vcpu;
2016 pr_devel("Releasing xive device\n");
2019 * Since this is the device release function, we know that
2020 * userspace does not have any open fd referring to the
2021 * device. Therefore there can not be any of the device
2022 * attribute set/get functions being executed concurrently,
2023 * and similarly, the connect_vcpu and set/clr_mapped
2024 * functions also cannot be being executed.
2027 debugfs_remove(xive->dentry);
2030 * We should clean up the vCPU interrupt presenters first.
2032 kvm_for_each_vcpu(i, vcpu, kvm) {
2034 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
2035 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
2036 * Holding the vcpu->mutex also means that the vcpu cannot
2037 * be executing the KVM_RUN ioctl, and therefore it cannot
2038 * be executing the XIVE push or pull code or accessing
2039 * the XIVE MMIO regions.
2041 mutex_lock(&vcpu->mutex);
2042 kvmppc_xive_cleanup_vcpu(vcpu);
2043 mutex_unlock(&vcpu->mutex);
2047 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
2048 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
2049 * against xive code getting called during vcpu execution or
2050 * set/get one_reg operations.
2052 kvm->arch.xive = NULL;
2054 /* Mask and free interrupts */
2055 for (i = 0; i <= xive->max_sbid; i++) {
2056 if (xive->src_blocks[i])
2057 kvmppc_xive_free_sources(xive->src_blocks[i]);
2058 kfree(xive->src_blocks[i]);
2059 xive->src_blocks[i] = NULL;
2062 if (xive->vp_base != XIVE_INVALID_VP)
2063 xive_native_free_vp_block(xive->vp_base);
2066 * A reference of the kvmppc_xive pointer is now kept under
2067 * the xive_devices struct of the machine for reuse. It is
2068 * freed when the VM is destroyed for now until we fix all the
2076 * When the guest chooses the interrupt mode (XICS legacy or XIVE
2077 * native), the VM will switch of KVM device. The previous device will
2078 * be "released" before the new one is created.
2080 * Until we are sure all execution paths are well protected, provide a
2081 * fail safe (transitional) method for device destruction, in which
2082 * the XIVE device pointer is recycled and not directly freed.
2084 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2086 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2087 &kvm->arch.xive_devices.native :
2088 &kvm->arch.xive_devices.xics_on_xive;
2089 struct kvmppc_xive *xive = *kvm_xive_device;
2092 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2093 *kvm_xive_device = xive;
2095 memset(xive, 0, sizeof(*xive));
2102 * Create a XICS device with XIVE backend. kvm->lock is held.
2104 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2106 struct kvmppc_xive *xive;
2107 struct kvm *kvm = dev->kvm;
2109 pr_devel("Creating xive for partition\n");
2111 /* Already there ? */
2115 xive = kvmppc_xive_get_device(kvm, type);
2119 dev->private = xive;
2122 mutex_init(&xive->lock);
2124 /* We use the default queue size set by the host */
2125 xive->q_order = xive_native_default_eq_shift();
2126 if (xive->q_order < PAGE_SHIFT)
2127 xive->q_page_order = 0;
2129 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2131 /* VP allocation is delayed to the first call to connect_vcpu */
2132 xive->vp_base = XIVE_INVALID_VP;
2133 /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
2134 * on a POWER9 system.
2136 xive->nr_servers = KVM_MAX_VCPUS;
2138 xive->single_escalation = xive_native_has_single_escalation();
2140 kvm->arch.xive = xive;
2144 int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2146 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2148 /* The VM should have configured XICS mode before doing XICS hcalls. */
2149 if (!kvmppc_xics_enabled(vcpu))
2154 return xive_vm_h_xirr(vcpu);
2156 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2158 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2160 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2161 kvmppc_get_gpr(vcpu, 5));
2163 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2165 xive_vm_h_xirr(vcpu);
2166 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2170 return H_UNSUPPORTED;
2172 EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2174 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2176 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2179 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2180 struct xive_q *q = &xc->queues[i];
2183 if (!q->qpage && !xc->esc_virq[i])
2187 seq_printf(m, " q[%d]: ", i);
2189 i0 = be32_to_cpup(q->qpage + idx);
2190 idx = (idx + 1) & q->msk;
2191 i1 = be32_to_cpup(q->qpage + idx);
2192 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2195 if (xc->esc_virq[i]) {
2196 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2197 struct xive_irq_data *xd =
2198 irq_data_get_irq_handler_data(d);
2199 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2201 seq_printf(m, " ESC %d %c%c EOI @%llx",
2203 (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2204 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2212 void kvmppc_xive_debug_show_sources(struct seq_file *m,
2213 struct kvmppc_xive_src_block *sb)
2217 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
2218 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2219 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2220 struct xive_irq_data *xd;
2227 kvmppc_xive_select_irq(state, &hw_num, &xd);
2229 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2231 seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
2234 seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2236 seq_puts(m, " MSI");
2238 seq_printf(m, " %s %c%c %08x % 4d/%d",
2239 state->ipi_number == hw_num ? "IPI" : " PT",
2240 pq & XIVE_ESB_VAL_P ? 'P' : '-',
2241 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2242 state->eisn, state->act_server,
2243 state->act_priority);
2249 static int xive_debug_show(struct seq_file *m, void *private)
2251 struct kvmppc_xive *xive = m->private;
2252 struct kvm *kvm = xive->kvm;
2253 struct kvm_vcpu *vcpu;
2254 u64 t_rm_h_xirr = 0;
2255 u64 t_rm_h_ipoll = 0;
2256 u64 t_rm_h_cppr = 0;
2259 u64 t_vm_h_xirr = 0;
2260 u64 t_vm_h_ipoll = 0;
2261 u64 t_vm_h_cppr = 0;
2269 seq_puts(m, "=========\nVCPU state\n=========\n");
2271 kvm_for_each_vcpu(i, vcpu, kvm) {
2272 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2277 seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2278 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2279 xc->server_num, xc->vp_id, xc->vp_chip_id,
2280 xc->cppr, xc->hw_cppr,
2281 xc->mfrr, xc->pending,
2282 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2284 kvmppc_xive_debug_show_queues(m, vcpu);
2286 t_rm_h_xirr += xc->stat_rm_h_xirr;
2287 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2288 t_rm_h_cppr += xc->stat_rm_h_cppr;
2289 t_rm_h_eoi += xc->stat_rm_h_eoi;
2290 t_rm_h_ipi += xc->stat_rm_h_ipi;
2291 t_vm_h_xirr += xc->stat_vm_h_xirr;
2292 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2293 t_vm_h_cppr += xc->stat_vm_h_cppr;
2294 t_vm_h_eoi += xc->stat_vm_h_eoi;
2295 t_vm_h_ipi += xc->stat_vm_h_ipi;
2298 seq_puts(m, "Hcalls totals\n");
2299 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2300 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2301 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2302 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2303 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2305 seq_puts(m, "=========\nSources\n=========\n");
2307 for (i = 0; i <= xive->max_sbid; i++) {
2308 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2311 arch_spin_lock(&sb->lock);
2312 kvmppc_xive_debug_show_sources(m, sb);
2313 arch_spin_unlock(&sb->lock);
2320 DEFINE_SHOW_ATTRIBUTE(xive_debug);
2322 static void xive_debugfs_init(struct kvmppc_xive *xive)
2326 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2328 pr_err("%s: no memory for name\n", __func__);
2332 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2333 xive, &xive_debug_fops);
2335 pr_debug("%s: created %s\n", __func__, name);
2339 static void kvmppc_xive_init(struct kvm_device *dev)
2341 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2343 /* Register some debug interfaces */
2344 xive_debugfs_init(xive);
2347 struct kvm_device_ops kvm_xive_ops = {
2349 .create = kvmppc_xive_create,
2350 .init = kvmppc_xive_init,
2351 .release = kvmppc_xive_release,
2352 .set_attr = xive_set_attr,
2353 .get_attr = xive_get_attr,
2354 .has_attr = xive_has_attr,