2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/list_sort.h>
22 #include <linux/nospec.h>
24 #include <asm/kvm_hyp.h>
28 #define CREATE_TRACE_POINTS
31 #ifdef CONFIG_DEBUG_SPINLOCK
32 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
34 #define DEBUG_SPINLOCK_BUG_ON(p)
37 struct vgic_global kvm_vgic_global_state __ro_after_init = {
38 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
42 * Locking order is always:
44 * its->cmd_lock (mutex)
45 * its->its_lock (mutex)
46 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
47 * kvm->lpi_list_lock must be taken with IRQs disabled
48 * vgic_irq->irq_lock must be taken with IRQs disabled
50 * As the ap_list_lock might be taken from the timer interrupt handler,
51 * we have to disable IRQs before taking this lock and everything lower
54 * If you need to take multiple locks, always take the upper lock first,
55 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
56 * If you are already holding a lock and need to take a higher one, you
57 * have to drop the lower ranking lock first and re-aquire it after having
58 * taken the upper one.
60 * When taking more than one ap_list_lock at the same time, always take the
61 * lowest numbered VCPU's ap_list_lock first, so:
62 * vcpuX->vcpu_id < vcpuY->vcpu_id:
63 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
64 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
66 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
67 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
68 * spinlocks for any lock that may be taken while injecting an interrupt.
72 * Iterate over the VM's list of mapped LPIs to find the one with a
73 * matching interrupt ID and return a reference to the IRQ structure.
75 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
77 struct vgic_dist *dist = &kvm->arch.vgic;
78 struct vgic_irq *irq = NULL;
81 spin_lock_irqsave(&dist->lpi_list_lock, flags);
83 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
84 if (irq->intid != intid)
88 * This increases the refcount, the caller is expected to
89 * call vgic_put_irq() later once it's finished with the IRQ.
91 vgic_get_irq_kref(irq);
97 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
103 * This looks up the virtual interrupt ID to get the corresponding
104 * struct vgic_irq. It also increases the refcount, so any caller is expected
105 * to call vgic_put_irq() once it's finished with this IRQ.
107 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
111 if (intid <= VGIC_MAX_PRIVATE) {
112 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
113 return &vcpu->arch.vgic_cpu.private_irqs[intid];
117 if (intid <= VGIC_MAX_SPI) {
118 intid = array_index_nospec(intid, VGIC_MAX_SPI);
119 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
123 if (intid >= VGIC_MIN_LPI)
124 return vgic_get_lpi(kvm, intid);
126 WARN(1, "Looking up struct vgic_irq for reserved INTID");
131 * We can't do anything in here, because we lack the kvm pointer to
132 * lock and remove the item from the lpi_list. So we keep this function
133 * empty and use the return value of kref_put() to trigger the freeing.
135 static void vgic_irq_release(struct kref *ref)
139 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
141 struct vgic_dist *dist = &kvm->arch.vgic;
144 if (irq->intid < VGIC_MIN_LPI)
147 spin_lock_irqsave(&dist->lpi_list_lock, flags);
148 if (!kref_put(&irq->refcount, vgic_irq_release)) {
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
153 list_del(&irq->lpi_list);
154 dist->lpi_list_count--;
155 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
160 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
162 WARN_ON(irq_set_irqchip_state(irq->host_irq,
163 IRQCHIP_STATE_PENDING,
167 bool vgic_get_phys_line_level(struct vgic_irq *irq)
173 if (irq->get_input_level)
174 return irq->get_input_level(irq->intid);
176 WARN_ON(irq_get_irqchip_state(irq->host_irq,
177 IRQCHIP_STATE_PENDING,
182 /* Set/Clear the physical active state */
183 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
187 WARN_ON(irq_set_irqchip_state(irq->host_irq,
188 IRQCHIP_STATE_ACTIVE,
193 * kvm_vgic_target_oracle - compute the target vcpu for an irq
195 * @irq: The irq to route. Must be already locked.
197 * Based on the current state of the interrupt (enabled, pending,
198 * active, vcpu and target_vcpu), compute the next vcpu this should be
199 * given to. Return NULL if this shouldn't be injected at all.
201 * Requires the IRQ lock to be held.
203 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
205 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
207 /* If the interrupt is active, it must stay on the current vcpu */
209 return irq->vcpu ? : irq->target_vcpu;
212 * If the IRQ is not active but enabled and pending, we should direct
213 * it to its configured target VCPU.
214 * If the distributor is disabled, pending interrupts shouldn't be
217 if (irq->enabled && irq_is_pending(irq)) {
218 if (unlikely(irq->target_vcpu &&
219 !irq->target_vcpu->kvm->arch.vgic.enabled))
222 return irq->target_vcpu;
225 /* If neither active nor pending and enabled, then this IRQ should not
226 * be queued to any VCPU.
232 * The order of items in the ap_lists defines how we'll pack things in LRs as
233 * well, the first items in the list being the first things populated in the
236 * A hard rule is that active interrupts can never be pushed out of the LRs
237 * (and therefore take priority) since we cannot reliably trap on deactivation
238 * of IRQs and therefore they have to be present in the LRs.
240 * Otherwise things should be sorted by the priority field and the GIC
241 * hardware support will take care of preemption of priority groups etc.
243 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
244 * to sort "b" before "a".
246 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
248 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
249 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
253 spin_lock(&irqa->irq_lock);
254 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
256 if (irqa->active || irqb->active) {
257 ret = (int)irqb->active - (int)irqa->active;
261 penda = irqa->enabled && irq_is_pending(irqa);
262 pendb = irqb->enabled && irq_is_pending(irqb);
264 if (!penda || !pendb) {
265 ret = (int)pendb - (int)penda;
269 /* Both pending and enabled, sort by priority */
270 ret = irqa->priority - irqb->priority;
272 spin_unlock(&irqb->irq_lock);
273 spin_unlock(&irqa->irq_lock);
277 /* Must be called with the ap_list_lock held */
278 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
280 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
282 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
284 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
288 * Only valid injection if changing level for level-triggered IRQs or for a
289 * rising edge, and in-kernel connected IRQ lines can only be controlled by
292 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
294 if (irq->owner != owner)
297 switch (irq->config) {
298 case VGIC_CONFIG_LEVEL:
299 return irq->line_level != level;
300 case VGIC_CONFIG_EDGE:
308 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
309 * Do the queuing if necessary, taking the right locks in the right order.
310 * Returns true when the IRQ was queued, false otherwise.
312 * Needs to be entered with the IRQ lock already held, but will return
313 * with all locks dropped.
315 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
318 struct kvm_vcpu *vcpu;
320 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
323 vcpu = vgic_target_oracle(irq);
324 if (irq->vcpu || !vcpu) {
326 * If this IRQ is already on a VCPU's ap_list, then it
327 * cannot be moved or modified and there is no more work for
330 * Otherwise, if the irq is not pending and enabled, it does
331 * not need to be inserted into an ap_list and there is also
332 * no more work for us to do.
334 spin_unlock_irqrestore(&irq->irq_lock, flags);
337 * We have to kick the VCPU here, because we could be
338 * queueing an edge-triggered interrupt for which we
339 * get no EOI maintenance interrupt. In that case,
340 * while the IRQ is already on the VCPU's AP list, the
341 * VCPU could have EOI'ed the original interrupt and
342 * won't see this one until it exits for some other
346 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
353 * We must unlock the irq lock to take the ap_list_lock where
354 * we are going to insert this new pending interrupt.
356 spin_unlock_irqrestore(&irq->irq_lock, flags);
358 /* someone can do stuff here, which we re-check below */
360 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
361 spin_lock(&irq->irq_lock);
364 * Did something change behind our backs?
366 * There are two cases:
367 * 1) The irq lost its pending state or was disabled behind our
368 * backs and/or it was queued to another VCPU's ap_list.
369 * 2) Someone changed the affinity on this irq behind our
370 * backs and we are now holding the wrong ap_list_lock.
372 * In both cases, drop the locks and retry.
375 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
376 spin_unlock(&irq->irq_lock);
377 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
379 spin_lock_irqsave(&irq->irq_lock, flags);
384 * Grab a reference to the irq to reflect the fact that it is
385 * now in the ap_list.
387 vgic_get_irq_kref(irq);
388 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
391 spin_unlock(&irq->irq_lock);
392 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
394 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
401 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
402 * @kvm: The VM structure pointer
403 * @cpuid: The CPU for PPIs
404 * @intid: The INTID to inject a new state to.
405 * @level: Edge-triggered: true: to trigger the interrupt
406 * false: to ignore the call
407 * Level-sensitive true: raise the input signal
408 * false: lower the input signal
409 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
410 * that the caller is allowed to inject this IRQ. Userspace
411 * injections will have owner == NULL.
413 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
414 * level-sensitive interrupts. You can think of the level parameter as 1
415 * being HIGH and 0 being LOW and all devices being active-HIGH.
417 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
418 bool level, void *owner)
420 struct kvm_vcpu *vcpu;
421 struct vgic_irq *irq;
425 trace_vgic_update_irq_pending(cpuid, intid, level);
427 ret = vgic_lazy_init(kvm);
431 vcpu = kvm_get_vcpu(kvm, cpuid);
432 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
435 irq = vgic_get_irq(kvm, vcpu, intid);
439 spin_lock_irqsave(&irq->irq_lock, flags);
441 if (!vgic_validate_injection(irq, level, owner)) {
442 /* Nothing to see here, move along... */
443 spin_unlock_irqrestore(&irq->irq_lock, flags);
444 vgic_put_irq(kvm, irq);
448 if (irq->config == VGIC_CONFIG_LEVEL)
449 irq->line_level = level;
451 irq->pending_latch = true;
453 vgic_queue_irq_unlock(kvm, irq, flags);
454 vgic_put_irq(kvm, irq);
459 /* @irq->irq_lock must be held */
460 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
461 unsigned int host_irq,
462 bool (*get_input_level)(int vindid))
464 struct irq_desc *desc;
465 struct irq_data *data;
468 * Find the physical IRQ number corresponding to @host_irq
470 desc = irq_to_desc(host_irq);
472 kvm_err("%s: no interrupt descriptor\n", __func__);
475 data = irq_desc_get_irq_data(desc);
476 while (data->parent_data)
477 data = data->parent_data;
480 irq->host_irq = host_irq;
481 irq->hwintid = data->hwirq;
482 irq->get_input_level = get_input_level;
486 /* @irq->irq_lock must be held */
487 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
491 irq->get_input_level = NULL;
494 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
495 u32 vintid, bool (*get_input_level)(int vindid))
497 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
503 spin_lock_irqsave(&irq->irq_lock, flags);
504 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
505 spin_unlock_irqrestore(&irq->irq_lock, flags);
506 vgic_put_irq(vcpu->kvm, irq);
512 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
513 * @vcpu: The VCPU pointer
514 * @vintid: The INTID of the interrupt
516 * Reset the active and pending states of a mapped interrupt. Kernel
517 * subsystems injecting mapped interrupts should reset their interrupt lines
518 * when we are doing a reset of the VM.
520 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
522 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
528 spin_lock_irqsave(&irq->irq_lock, flags);
530 irq->pending_latch = false;
531 irq->line_level = false;
532 spin_unlock_irqrestore(&irq->irq_lock, flags);
534 vgic_put_irq(vcpu->kvm, irq);
537 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 struct vgic_irq *irq;
542 if (!vgic_initialized(vcpu->kvm))
545 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
548 spin_lock_irqsave(&irq->irq_lock, flags);
549 kvm_vgic_unmap_irq(irq);
550 spin_unlock_irqrestore(&irq->irq_lock, flags);
551 vgic_put_irq(vcpu->kvm, irq);
557 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
559 * @vcpu: Pointer to the VCPU (used for PPIs)
560 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
561 * @owner: Opaque pointer to the owner
563 * Returns 0 if intid is not already used by another in-kernel device and the
564 * owner is set, otherwise returns an error code.
566 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
568 struct vgic_irq *irq;
572 if (!vgic_initialized(vcpu->kvm))
575 /* SGIs and LPIs cannot be wired up to any device */
576 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
579 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
580 spin_lock_irqsave(&irq->irq_lock, flags);
581 if (irq->owner && irq->owner != owner)
585 spin_unlock_irqrestore(&irq->irq_lock, flags);
591 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
593 * @vcpu: The VCPU pointer
595 * Go over the list of "interesting" interrupts, and prune those that we
596 * won't have to consider in the near future.
598 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
600 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
601 struct vgic_irq *irq, *tmp;
605 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
607 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
608 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
609 bool target_vcpu_needs_kick = false;
611 spin_lock(&irq->irq_lock);
613 BUG_ON(vcpu != irq->vcpu);
615 target_vcpu = vgic_target_oracle(irq);
619 * We don't need to process this interrupt any
620 * further, move it off the list.
622 list_del(&irq->ap_list);
624 spin_unlock(&irq->irq_lock);
627 * This vgic_put_irq call matches the
628 * vgic_get_irq_kref in vgic_queue_irq_unlock,
629 * where we added the LPI to the ap_list. As
630 * we remove the irq from the list, we drop
631 * also drop the refcount.
633 vgic_put_irq(vcpu->kvm, irq);
637 if (target_vcpu == vcpu) {
638 /* We're on the right CPU */
639 spin_unlock(&irq->irq_lock);
643 /* This interrupt looks like it has to be migrated. */
645 spin_unlock(&irq->irq_lock);
646 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
649 * Ensure locking order by always locking the smallest
652 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
660 spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
661 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
662 SINGLE_DEPTH_NESTING);
663 spin_lock(&irq->irq_lock);
666 * If the affinity has been preserved, move the
667 * interrupt around. Otherwise, it means things have
668 * changed while the interrupt was unlocked, and we
669 * need to replay this.
671 * In all cases, we cannot trust the list not to have
672 * changed, so we restart from the beginning.
674 if (target_vcpu == vgic_target_oracle(irq)) {
675 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
677 list_del(&irq->ap_list);
678 irq->vcpu = target_vcpu;
679 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
680 target_vcpu_needs_kick = true;
683 spin_unlock(&irq->irq_lock);
684 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
685 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
687 if (target_vcpu_needs_kick) {
688 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
689 kvm_vcpu_kick(target_vcpu);
695 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
698 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
700 if (kvm_vgic_global_state.type == VGIC_V2)
701 vgic_v2_fold_lr_state(vcpu);
703 vgic_v3_fold_lr_state(vcpu);
706 /* Requires the irq_lock to be held. */
707 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
708 struct vgic_irq *irq, int lr)
710 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
712 if (kvm_vgic_global_state.type == VGIC_V2)
713 vgic_v2_populate_lr(vcpu, irq, lr);
715 vgic_v3_populate_lr(vcpu, irq, lr);
718 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
720 if (kvm_vgic_global_state.type == VGIC_V2)
721 vgic_v2_clear_lr(vcpu, lr);
723 vgic_v3_clear_lr(vcpu, lr);
726 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
728 if (kvm_vgic_global_state.type == VGIC_V2)
729 vgic_v2_set_underflow(vcpu);
731 vgic_v3_set_underflow(vcpu);
734 /* Requires the ap_list_lock to be held. */
735 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
738 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
739 struct vgic_irq *irq;
744 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
746 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
749 spin_lock(&irq->irq_lock);
750 /* GICv2 SGIs can count for more than one... */
751 w = vgic_irq_get_lr_count(irq);
752 spin_unlock(&irq->irq_lock);
755 *multi_sgi |= (w > 1);
760 /* Requires the VCPU's ap_list_lock to be held. */
761 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
763 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
764 struct vgic_irq *irq;
769 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
771 count = compute_ap_list_depth(vcpu, &multi_sgi);
772 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
773 vgic_sort_ap_list(vcpu);
777 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
778 spin_lock(&irq->irq_lock);
781 * If we have multi-SGIs in the pipeline, we need to
782 * guarantee that they are all seen before any IRQ of
783 * lower priority. In that case, we need to filter out
784 * these interrupts by exiting early. This is easy as
785 * the AP list has been sorted already.
787 if (multi_sgi && irq->priority > prio) {
788 spin_unlock(&irq->irq_lock);
792 if (likely(vgic_target_oracle(irq) == vcpu)) {
793 vgic_populate_lr(vcpu, irq, count++);
796 prio = irq->priority;
799 spin_unlock(&irq->irq_lock);
801 if (count == kvm_vgic_global_state.nr_lr) {
802 if (!list_is_last(&irq->ap_list,
803 &vgic_cpu->ap_list_head))
804 vgic_set_underflow(vcpu);
809 vcpu->arch.vgic_cpu.used_lrs = count;
811 /* Nuke remaining LRs */
812 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
813 vgic_clear_lr(vcpu, count);
816 static inline bool can_access_vgic_from_kernel(void)
819 * GICv2 can always be accessed from the kernel because it is
820 * memory-mapped, and VHE systems can access GICv3 EL2 system
823 return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
826 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
828 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
829 vgic_v2_save_state(vcpu);
831 __vgic_v3_save_state(vcpu);
834 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
835 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
837 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
839 WARN_ON(vgic_v4_sync_hwstate(vcpu));
841 /* An empty ap_list_head implies used_lrs == 0 */
842 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
845 if (can_access_vgic_from_kernel())
846 vgic_save_state(vcpu);
848 if (vgic_cpu->used_lrs)
849 vgic_fold_lr_state(vcpu);
850 vgic_prune_ap_list(vcpu);
853 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
855 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
856 vgic_v2_restore_state(vcpu);
858 __vgic_v3_restore_state(vcpu);
861 /* Flush our emulation state into the GIC hardware before entering the guest. */
862 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
864 WARN_ON(vgic_v4_flush_hwstate(vcpu));
867 * If there are no virtual interrupts active or pending for this
868 * VCPU, then there is no work to do and we can bail out without
869 * taking any lock. There is a potential race with someone injecting
870 * interrupts to the VCPU, but it is a benign race as the VCPU will
871 * either observe the new interrupt before or after doing this check,
872 * and introducing additional synchronization mechanism doesn't change
875 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
878 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
880 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
881 vgic_flush_lr_state(vcpu);
882 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
884 if (can_access_vgic_from_kernel())
885 vgic_restore_state(vcpu);
888 void kvm_vgic_load(struct kvm_vcpu *vcpu)
890 if (unlikely(!vgic_initialized(vcpu->kvm)))
893 if (kvm_vgic_global_state.type == VGIC_V2)
899 void kvm_vgic_put(struct kvm_vcpu *vcpu)
901 if (unlikely(!vgic_initialized(vcpu->kvm)))
904 if (kvm_vgic_global_state.type == VGIC_V2)
910 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
912 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
913 struct vgic_irq *irq;
914 bool pending = false;
917 if (!vcpu->kvm->arch.vgic.enabled)
920 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
923 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
925 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
926 spin_lock(&irq->irq_lock);
927 pending = irq_is_pending(irq) && irq->enabled;
928 spin_unlock(&irq->irq_lock);
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
939 void vgic_kick_vcpus(struct kvm *kvm)
941 struct kvm_vcpu *vcpu;
945 * We've injected an interrupt, time to find out who deserves
948 kvm_for_each_vcpu(c, vcpu, kvm) {
949 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
950 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
956 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
958 struct vgic_irq *irq;
962 if (!vgic_initialized(vcpu->kvm))
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq);
971 return map_is_active;