1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015, 2016 ARM Ltd.
6 #include <linux/interrupt.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list_sort.h>
11 #include <linux/nospec.h>
13 #include <asm/kvm_hyp.h>
17 #define CREATE_TRACE_POINTS
20 struct vgic_global kvm_vgic_global_state __ro_after_init = {
21 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
25 * Locking order is always:
27 * its->cmd_lock (mutex)
28 * its->its_lock (mutex)
29 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
30 * kvm->lpi_list_lock must be taken with IRQs disabled
31 * vgic_irq->irq_lock must be taken with IRQs disabled
33 * As the ap_list_lock might be taken from the timer interrupt handler,
34 * we have to disable IRQs before taking this lock and everything lower
37 * If you need to take multiple locks, always take the upper lock first,
38 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
39 * If you are already holding a lock and need to take a higher one, you
40 * have to drop the lower ranking lock first and re-aquire it after having
41 * taken the upper one.
43 * When taking more than one ap_list_lock at the same time, always take the
44 * lowest numbered VCPU's ap_list_lock first, so:
45 * vcpuX->vcpu_id < vcpuY->vcpu_id:
46 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
47 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
49 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
50 * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
51 * spinlocks for any lock that may be taken while injecting an interrupt.
55 * Iterate over the VM's list of mapped LPIs to find the one with a
56 * matching interrupt ID and return a reference to the IRQ structure.
58 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
60 struct vgic_dist *dist = &kvm->arch.vgic;
61 struct vgic_irq *irq = NULL;
64 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67 if (irq->intid != intid)
71 * This increases the refcount, the caller is expected to
72 * call vgic_put_irq() later once it's finished with the IRQ.
74 vgic_get_irq_kref(irq);
80 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
86 * This looks up the virtual interrupt ID to get the corresponding
87 * struct vgic_irq. It also increases the refcount, so any caller is expected
88 * to call vgic_put_irq() once it's finished with this IRQ.
90 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
94 if (intid <= VGIC_MAX_PRIVATE) {
95 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
96 return &vcpu->arch.vgic_cpu.private_irqs[intid];
100 if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
101 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
106 if (intid >= VGIC_MIN_LPI)
107 return vgic_get_lpi(kvm, intid);
109 WARN(1, "Looking up struct vgic_irq for reserved INTID");
114 * We can't do anything in here, because we lack the kvm pointer to
115 * lock and remove the item from the lpi_list. So we keep this function
116 * empty and use the return value of kref_put() to trigger the freeing.
118 static void vgic_irq_release(struct kref *ref)
123 * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
127 struct vgic_dist *dist = &kvm->arch.vgic;
129 if (!kref_put(&irq->refcount, vgic_irq_release))
132 list_del(&irq->lpi_list);
133 dist->lpi_list_count--;
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
140 struct vgic_dist *dist = &kvm->arch.vgic;
143 if (irq->intid < VGIC_MIN_LPI)
146 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
147 __vgic_put_lpi_locked(kvm, irq);
148 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
151 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
153 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
154 struct vgic_irq *irq, *tmp;
157 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
160 if (irq->intid >= VGIC_MIN_LPI) {
161 raw_spin_lock(&irq->irq_lock);
162 list_del(&irq->ap_list);
164 raw_spin_unlock(&irq->irq_lock);
165 vgic_put_irq(vcpu->kvm, irq);
169 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
174 WARN_ON(irq_set_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
179 bool vgic_get_phys_line_level(struct vgic_irq *irq)
185 if (irq->get_input_level)
186 return irq->get_input_level(irq->intid);
188 WARN_ON(irq_get_irqchip_state(irq->host_irq,
189 IRQCHIP_STATE_PENDING,
194 /* Set/Clear the physical active state */
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
199 WARN_ON(irq_set_irqchip_state(irq->host_irq,
200 IRQCHIP_STATE_ACTIVE,
205 * kvm_vgic_target_oracle - compute the target vcpu for an irq
207 * @irq: The irq to route. Must be already locked.
209 * Based on the current state of the interrupt (enabled, pending,
210 * active, vcpu and target_vcpu), compute the next vcpu this should be
211 * given to. Return NULL if this shouldn't be injected at all.
213 * Requires the IRQ lock to be held.
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
217 lockdep_assert_held(&irq->irq_lock);
219 /* If the interrupt is active, it must stay on the current vcpu */
221 return irq->vcpu ? : irq->target_vcpu;
224 * If the IRQ is not active but enabled and pending, we should direct
225 * it to its configured target VCPU.
226 * If the distributor is disabled, pending interrupts shouldn't be
229 if (irq->enabled && irq_is_pending(irq)) {
230 if (unlikely(irq->target_vcpu &&
231 !irq->target_vcpu->kvm->arch.vgic.enabled))
234 return irq->target_vcpu;
237 /* If neither active nor pending and enabled, then this IRQ should not
238 * be queued to any VCPU.
244 * The order of items in the ap_lists defines how we'll pack things in LRs as
245 * well, the first items in the list being the first things populated in the
248 * A hard rule is that active interrupts can never be pushed out of the LRs
249 * (and therefore take priority) since we cannot reliably trap on deactivation
250 * of IRQs and therefore they have to be present in the LRs.
252 * Otherwise things should be sorted by the priority field and the GIC
253 * hardware support will take care of preemption of priority groups etc.
255 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
256 * to sort "b" before "a".
258 static int vgic_irq_cmp(void *priv, const struct list_head *a,
259 const struct list_head *b)
261 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
262 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
267 * list_sort may call this function with the same element when
268 * the list is fairly long.
270 if (unlikely(irqa == irqb))
273 raw_spin_lock(&irqa->irq_lock);
274 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
276 if (irqa->active || irqb->active) {
277 ret = (int)irqb->active - (int)irqa->active;
281 penda = irqa->enabled && irq_is_pending(irqa);
282 pendb = irqb->enabled && irq_is_pending(irqb);
284 if (!penda || !pendb) {
285 ret = (int)pendb - (int)penda;
289 /* Both pending and enabled, sort by priority */
290 ret = irqa->priority - irqb->priority;
292 raw_spin_unlock(&irqb->irq_lock);
293 raw_spin_unlock(&irqa->irq_lock);
297 /* Must be called with the ap_list_lock held */
298 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
300 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
302 lockdep_assert_held(&vgic_cpu->ap_list_lock);
304 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
308 * Only valid injection if changing level for level-triggered IRQs or for a
309 * rising edge, and in-kernel connected IRQ lines can only be controlled by
312 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
314 if (irq->owner != owner)
317 switch (irq->config) {
318 case VGIC_CONFIG_LEVEL:
319 return irq->line_level != level;
320 case VGIC_CONFIG_EDGE:
328 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
329 * Do the queuing if necessary, taking the right locks in the right order.
330 * Returns true when the IRQ was queued, false otherwise.
332 * Needs to be entered with the IRQ lock already held, but will return
333 * with all locks dropped.
335 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
338 struct kvm_vcpu *vcpu;
340 lockdep_assert_held(&irq->irq_lock);
343 vcpu = vgic_target_oracle(irq);
344 if (irq->vcpu || !vcpu) {
346 * If this IRQ is already on a VCPU's ap_list, then it
347 * cannot be moved or modified and there is no more work for
350 * Otherwise, if the irq is not pending and enabled, it does
351 * not need to be inserted into an ap_list and there is also
352 * no more work for us to do.
354 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
357 * We have to kick the VCPU here, because we could be
358 * queueing an edge-triggered interrupt for which we
359 * get no EOI maintenance interrupt. In that case,
360 * while the IRQ is already on the VCPU's AP list, the
361 * VCPU could have EOI'ed the original interrupt and
362 * won't see this one until it exits for some other
366 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
373 * We must unlock the irq lock to take the ap_list_lock where
374 * we are going to insert this new pending interrupt.
376 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
378 /* someone can do stuff here, which we re-check below */
380 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
381 raw_spin_lock(&irq->irq_lock);
384 * Did something change behind our backs?
386 * There are two cases:
387 * 1) The irq lost its pending state or was disabled behind our
388 * backs and/or it was queued to another VCPU's ap_list.
389 * 2) Someone changed the affinity on this irq behind our
390 * backs and we are now holding the wrong ap_list_lock.
392 * In both cases, drop the locks and retry.
395 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
396 raw_spin_unlock(&irq->irq_lock);
397 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
400 raw_spin_lock_irqsave(&irq->irq_lock, flags);
405 * Grab a reference to the irq to reflect the fact that it is
406 * now in the ap_list.
408 vgic_get_irq_kref(irq);
409 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
412 raw_spin_unlock(&irq->irq_lock);
413 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
415 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
422 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
423 * @kvm: The VM structure pointer
424 * @cpuid: The CPU for PPIs
425 * @intid: The INTID to inject a new state to.
426 * @level: Edge-triggered: true: to trigger the interrupt
427 * false: to ignore the call
428 * Level-sensitive true: raise the input signal
429 * false: lower the input signal
430 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
431 * that the caller is allowed to inject this IRQ. Userspace
432 * injections will have owner == NULL.
434 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
435 * level-sensitive interrupts. You can think of the level parameter as 1
436 * being HIGH and 0 being LOW and all devices being active-HIGH.
438 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
439 bool level, void *owner)
441 struct kvm_vcpu *vcpu;
442 struct vgic_irq *irq;
446 trace_vgic_update_irq_pending(cpuid, intid, level);
448 ret = vgic_lazy_init(kvm);
452 vcpu = kvm_get_vcpu(kvm, cpuid);
453 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
456 irq = vgic_get_irq(kvm, vcpu, intid);
460 raw_spin_lock_irqsave(&irq->irq_lock, flags);
462 if (!vgic_validate_injection(irq, level, owner)) {
463 /* Nothing to see here, move along... */
464 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
465 vgic_put_irq(kvm, irq);
469 if (irq->config == VGIC_CONFIG_LEVEL)
470 irq->line_level = level;
472 irq->pending_latch = true;
474 vgic_queue_irq_unlock(kvm, irq, flags);
475 vgic_put_irq(kvm, irq);
480 /* @irq->irq_lock must be held */
481 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
482 unsigned int host_irq,
483 bool (*get_input_level)(int vindid))
485 struct irq_desc *desc;
486 struct irq_data *data;
489 * Find the physical IRQ number corresponding to @host_irq
491 desc = irq_to_desc(host_irq);
493 kvm_err("%s: no interrupt descriptor\n", __func__);
496 data = irq_desc_get_irq_data(desc);
497 while (data->parent_data)
498 data = data->parent_data;
501 irq->host_irq = host_irq;
502 irq->hwintid = data->hwirq;
503 irq->get_input_level = get_input_level;
507 /* @irq->irq_lock must be held */
508 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
512 irq->get_input_level = NULL;
515 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
516 u32 vintid, bool (*get_input_level)(int vindid))
518 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
524 raw_spin_lock_irqsave(&irq->irq_lock, flags);
525 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527 vgic_put_irq(vcpu->kvm, irq);
533 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
534 * @vcpu: The VCPU pointer
535 * @vintid: The INTID of the interrupt
537 * Reset the active and pending states of a mapped interrupt. Kernel
538 * subsystems injecting mapped interrupts should reset their interrupt lines
539 * when we are doing a reset of the VM.
541 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
543 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
549 raw_spin_lock_irqsave(&irq->irq_lock, flags);
551 irq->pending_latch = false;
552 irq->line_level = false;
553 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
555 vgic_put_irq(vcpu->kvm, irq);
558 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
560 struct vgic_irq *irq;
563 if (!vgic_initialized(vcpu->kvm))
566 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
569 raw_spin_lock_irqsave(&irq->irq_lock, flags);
570 kvm_vgic_unmap_irq(irq);
571 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
572 vgic_put_irq(vcpu->kvm, irq);
578 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
580 * @vcpu: Pointer to the VCPU (used for PPIs)
581 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
582 * @owner: Opaque pointer to the owner
584 * Returns 0 if intid is not already used by another in-kernel device and the
585 * owner is set, otherwise returns an error code.
587 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
589 struct vgic_irq *irq;
593 if (!vgic_initialized(vcpu->kvm))
596 /* SGIs and LPIs cannot be wired up to any device */
597 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
600 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
601 raw_spin_lock_irqsave(&irq->irq_lock, flags);
602 if (irq->owner && irq->owner != owner)
606 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
612 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
614 * @vcpu: The VCPU pointer
616 * Go over the list of "interesting" interrupts, and prune those that we
617 * won't have to consider in the near future.
619 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
621 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
622 struct vgic_irq *irq, *tmp;
624 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
627 raw_spin_lock(&vgic_cpu->ap_list_lock);
629 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
630 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
631 bool target_vcpu_needs_kick = false;
633 raw_spin_lock(&irq->irq_lock);
635 BUG_ON(vcpu != irq->vcpu);
637 target_vcpu = vgic_target_oracle(irq);
641 * We don't need to process this interrupt any
642 * further, move it off the list.
644 list_del(&irq->ap_list);
646 raw_spin_unlock(&irq->irq_lock);
649 * This vgic_put_irq call matches the
650 * vgic_get_irq_kref in vgic_queue_irq_unlock,
651 * where we added the LPI to the ap_list. As
652 * we remove the irq from the list, we drop
653 * also drop the refcount.
655 vgic_put_irq(vcpu->kvm, irq);
659 if (target_vcpu == vcpu) {
660 /* We're on the right CPU */
661 raw_spin_unlock(&irq->irq_lock);
665 /* This interrupt looks like it has to be migrated. */
667 raw_spin_unlock(&irq->irq_lock);
668 raw_spin_unlock(&vgic_cpu->ap_list_lock);
671 * Ensure locking order by always locking the smallest
674 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
682 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
683 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
684 SINGLE_DEPTH_NESTING);
685 raw_spin_lock(&irq->irq_lock);
688 * If the affinity has been preserved, move the
689 * interrupt around. Otherwise, it means things have
690 * changed while the interrupt was unlocked, and we
691 * need to replay this.
693 * In all cases, we cannot trust the list not to have
694 * changed, so we restart from the beginning.
696 if (target_vcpu == vgic_target_oracle(irq)) {
697 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
699 list_del(&irq->ap_list);
700 irq->vcpu = target_vcpu;
701 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
702 target_vcpu_needs_kick = true;
705 raw_spin_unlock(&irq->irq_lock);
706 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
707 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
709 if (target_vcpu_needs_kick) {
710 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
711 kvm_vcpu_kick(target_vcpu);
717 raw_spin_unlock(&vgic_cpu->ap_list_lock);
720 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
722 if (kvm_vgic_global_state.type == VGIC_V2)
723 vgic_v2_fold_lr_state(vcpu);
725 vgic_v3_fold_lr_state(vcpu);
728 /* Requires the irq_lock to be held. */
729 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
730 struct vgic_irq *irq, int lr)
732 lockdep_assert_held(&irq->irq_lock);
734 if (kvm_vgic_global_state.type == VGIC_V2)
735 vgic_v2_populate_lr(vcpu, irq, lr);
737 vgic_v3_populate_lr(vcpu, irq, lr);
740 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
742 if (kvm_vgic_global_state.type == VGIC_V2)
743 vgic_v2_clear_lr(vcpu, lr);
745 vgic_v3_clear_lr(vcpu, lr);
748 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
750 if (kvm_vgic_global_state.type == VGIC_V2)
751 vgic_v2_set_underflow(vcpu);
753 vgic_v3_set_underflow(vcpu);
756 /* Requires the ap_list_lock to be held. */
757 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
760 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
761 struct vgic_irq *irq;
766 lockdep_assert_held(&vgic_cpu->ap_list_lock);
768 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
771 raw_spin_lock(&irq->irq_lock);
772 /* GICv2 SGIs can count for more than one... */
773 w = vgic_irq_get_lr_count(irq);
774 raw_spin_unlock(&irq->irq_lock);
777 *multi_sgi |= (w > 1);
782 /* Requires the VCPU's ap_list_lock to be held. */
783 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
785 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
786 struct vgic_irq *irq;
792 lockdep_assert_held(&vgic_cpu->ap_list_lock);
794 count = compute_ap_list_depth(vcpu, &multi_sgi);
795 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
796 vgic_sort_ap_list(vcpu);
800 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
801 raw_spin_lock(&irq->irq_lock);
804 * If we have multi-SGIs in the pipeline, we need to
805 * guarantee that they are all seen before any IRQ of
806 * lower priority. In that case, we need to filter out
807 * these interrupts by exiting early. This is easy as
808 * the AP list has been sorted already.
810 if (multi_sgi && irq->priority > prio) {
811 _raw_spin_unlock(&irq->irq_lock);
815 if (likely(vgic_target_oracle(irq) == vcpu)) {
816 vgic_populate_lr(vcpu, irq, count++);
819 prio = irq->priority;
822 raw_spin_unlock(&irq->irq_lock);
824 if (count == kvm_vgic_global_state.nr_lr) {
825 if (!list_is_last(&irq->ap_list,
826 &vgic_cpu->ap_list_head))
827 vgic_set_underflow(vcpu);
832 /* Nuke remaining LRs */
833 for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
834 vgic_clear_lr(vcpu, i);
836 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
837 vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
839 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
842 static inline bool can_access_vgic_from_kernel(void)
845 * GICv2 can always be accessed from the kernel because it is
846 * memory-mapped, and VHE systems can access GICv3 EL2 system
849 return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
852 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
854 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
855 vgic_v2_save_state(vcpu);
857 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
860 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
861 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
865 /* An empty ap_list_head implies used_lrs == 0 */
866 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
869 if (can_access_vgic_from_kernel())
870 vgic_save_state(vcpu);
872 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
873 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
875 used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
878 vgic_fold_lr_state(vcpu);
879 vgic_prune_ap_list(vcpu);
882 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
884 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
885 vgic_v2_restore_state(vcpu);
887 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
890 /* Flush our emulation state into the GIC hardware before entering the guest. */
891 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
894 * If there are no virtual interrupts active or pending for this
895 * VCPU, then there is no work to do and we can bail out without
896 * taking any lock. There is a potential race with someone injecting
897 * interrupts to the VCPU, but it is a benign race as the VCPU will
898 * either observe the new interrupt before or after doing this check,
899 * and introducing additional synchronization mechanism doesn't change
902 * Note that we still need to go through the whole thing if anything
903 * can be directly injected (GICv4).
905 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
906 !vgic_supports_direct_msis(vcpu->kvm))
909 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
911 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
912 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
913 vgic_flush_lr_state(vcpu);
914 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
917 if (can_access_vgic_from_kernel())
918 vgic_restore_state(vcpu);
920 if (vgic_supports_direct_msis(vcpu->kvm))
921 vgic_v4_commit(vcpu);
924 void kvm_vgic_load(struct kvm_vcpu *vcpu)
926 if (unlikely(!vgic_initialized(vcpu->kvm)))
929 if (kvm_vgic_global_state.type == VGIC_V2)
935 void kvm_vgic_put(struct kvm_vcpu *vcpu)
937 if (unlikely(!vgic_initialized(vcpu->kvm)))
940 if (kvm_vgic_global_state.type == VGIC_V2)
946 void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
948 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
951 if (kvm_vgic_global_state.type == VGIC_V2)
952 vgic_v2_vmcr_sync(vcpu);
954 vgic_v3_vmcr_sync(vcpu);
957 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
959 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
960 struct vgic_irq *irq;
961 bool pending = false;
963 struct vgic_vmcr vmcr;
965 if (!vcpu->kvm->arch.vgic.enabled)
968 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
971 vgic_get_vmcr(vcpu, &vmcr);
973 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
975 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
976 raw_spin_lock(&irq->irq_lock);
977 pending = irq_is_pending(irq) && irq->enabled &&
979 irq->priority < vmcr.pmr;
980 raw_spin_unlock(&irq->irq_lock);
986 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
991 void vgic_kick_vcpus(struct kvm *kvm)
993 struct kvm_vcpu *vcpu;
997 * We've injected an interrupt, time to find out who deserves
1000 kvm_for_each_vcpu(c, vcpu, kvm) {
1001 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1002 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1003 kvm_vcpu_kick(vcpu);
1008 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1010 struct vgic_irq *irq;
1012 unsigned long flags;
1014 if (!vgic_initialized(vcpu->kvm))
1017 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1018 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1019 map_is_active = irq->hw && irq->active;
1020 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1021 vgic_put_irq(vcpu->kvm, irq);
1023 return map_is_active;