1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/irqchip/arm-gic-v3.h>
5 #include <linux/kvm_host.h>
6 #include <kvm/arm_vgic.h>
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/kvm_asm.h>
13 static bool group0_trap;
14 static bool group1_trap;
15 static bool common_trap;
16 static bool gicv4_enable;
18 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
20 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
22 cpuif->vgic_hcr |= ICH_HCR_UIE;
25 static bool lr_signals_eoi_mi(u64 lr_val)
27 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
28 !(lr_val & ICH_LR_HW);
31 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
33 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
34 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
35 u32 model = vcpu->kvm->arch.vgic.vgic_model;
38 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
40 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
42 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
43 u64 val = cpuif->vgic_lr[lr];
46 bool is_v2_sgi = false;
48 cpuid = val & GICH_LR_PHYSID_CPUID;
49 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
51 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
52 intid = val & ICH_LR_VIRTUAL_ID_MASK;
54 intid = val & GICH_LR_VIRTUALID;
55 is_v2_sgi = vgic_irq_is_sgi(intid);
58 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
59 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
60 kvm_notify_acked_irq(vcpu->kvm, 0,
61 intid - VGIC_NR_PRIVATE_IRQS);
63 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
64 if (!irq) /* An LPI could have been unmapped. */
67 raw_spin_lock(&irq->irq_lock);
69 /* Always preserve the active bit */
70 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
72 if (irq->active && is_v2_sgi)
73 irq->active_source = cpuid;
75 /* Edge is the only case where we preserve the pending bit */
76 if (irq->config == VGIC_CONFIG_EDGE &&
77 (val & ICH_LR_PENDING_BIT)) {
78 irq->pending_latch = true;
81 irq->source |= (1 << cpuid);
85 * Clear soft pending state when level irqs have been acked.
87 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
88 irq->pending_latch = false;
91 * Level-triggered mapped IRQs are special because we only
92 * observe rising edges as input to the VGIC.
94 * If the guest never acked the interrupt we have to sample
95 * the physical line and set the line level, because the
96 * device state could have changed or we simply need to
97 * process the still pending interrupt later.
99 * If this causes us to lower the level, we have to also clear
100 * the physical active state, since we will otherwise never be
101 * told when the interrupt becomes asserted again.
103 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
104 irq->line_level = vgic_get_phys_line_level(irq);
106 if (!irq->line_level)
107 vgic_irq_set_phys_active(irq, false);
110 raw_spin_unlock(&irq->irq_lock);
111 vgic_put_irq(vcpu->kvm, irq);
114 vgic_cpu->used_lrs = 0;
117 /* Requires the irq to be locked already */
118 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
120 u32 model = vcpu->kvm->arch.vgic.vgic_model;
121 u64 val = irq->intid;
122 bool allow_pending = true, is_v2_sgi;
124 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
125 model == KVM_DEV_TYPE_ARM_VGIC_V2);
128 val |= ICH_LR_ACTIVE_BIT;
130 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
131 if (vgic_irq_is_multi_sgi(irq)) {
132 allow_pending = false;
139 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
141 * Never set pending+active on a HW interrupt, as the
142 * pending state is kept at the physical distributor
146 allow_pending = false;
148 if (irq->config == VGIC_CONFIG_LEVEL) {
152 * Software resampling doesn't work very well
153 * if we allow P+A, so let's not do that.
156 allow_pending = false;
160 if (allow_pending && irq_is_pending(irq)) {
161 val |= ICH_LR_PENDING_BIT;
163 if (irq->config == VGIC_CONFIG_EDGE)
164 irq->pending_latch = false;
166 if (vgic_irq_is_sgi(irq->intid) &&
167 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168 u32 src = ffs(irq->source);
171 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
172 irq->source &= ~(1 << (src - 1));
174 irq->pending_latch = true;
181 * Level-triggered mapped IRQs are special because we only observe
182 * rising edges as input to the VGIC. We therefore lower the line
183 * level here, so that we can take new virtual IRQs. See
184 * vgic_v3_fold_lr_state for more info.
186 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
187 irq->line_level = false;
192 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
194 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
197 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
199 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
202 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
204 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
205 u32 model = vcpu->kvm->arch.vgic.vgic_model;
208 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
209 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
210 ICH_VMCR_ACK_CTL_MASK;
211 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
212 ICH_VMCR_FIQ_EN_MASK;
215 * When emulating GICv3 on GICv3 with SRE=1 on the
216 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
218 vmcr = ICH_VMCR_FIQ_EN_MASK;
221 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
222 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
223 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
224 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
225 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
226 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
227 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
229 cpu_if->vgic_vmcr = vmcr;
232 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
234 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
235 u32 model = vcpu->kvm->arch.vgic.vgic_model;
238 vmcr = cpu_if->vgic_vmcr;
240 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
241 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
242 ICH_VMCR_ACK_CTL_SHIFT;
243 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
244 ICH_VMCR_FIQ_EN_SHIFT;
247 * When emulating GICv3 on GICv3 with SRE=1 on the
248 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
254 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
255 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
256 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
257 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
258 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
259 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
260 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
263 #define INITIAL_PENDBASER_VALUE \
264 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
265 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
266 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
268 void vgic_v3_enable(struct kvm_vcpu *vcpu)
270 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
273 * By forcing VMCR to zero, the GIC will restore the binary
274 * points to their reset values. Anything else resets to zero
277 vgic_v3->vgic_vmcr = 0;
280 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
281 * way, so we force SRE to 1 to demonstrate this to the guest.
282 * Also, we don't support any form of IRQ/FIQ bypass.
283 * This goes with the spec allowing the value to be RAO/WI.
285 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
286 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
289 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
291 vgic_v3->vgic_sre = 0;
294 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
295 ICH_VTR_ID_BITS_MASK) >>
296 ICH_VTR_ID_BITS_SHIFT;
297 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
298 ICH_VTR_PRI_BITS_MASK) >>
299 ICH_VTR_PRI_BITS_SHIFT) + 1;
301 /* Get the show on the road... */
302 vgic_v3->vgic_hcr = ICH_HCR_EN;
304 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
306 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
308 vgic_v3->vgic_hcr |= ICH_HCR_TC;
311 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
313 struct kvm_vcpu *vcpu;
314 int byte_offset, bit_nr;
322 vcpu = irq->target_vcpu;
326 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
328 byte_offset = irq->intid / BITS_PER_BYTE;
329 bit_nr = irq->intid % BITS_PER_BYTE;
330 ptr = pendbase + byte_offset;
332 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
336 status = val & (1 << bit_nr);
338 raw_spin_lock_irqsave(&irq->irq_lock, flags);
339 if (irq->target_vcpu != vcpu) {
340 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
343 irq->pending_latch = status;
344 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
347 /* clear consumed data */
348 val &= ~(1 << bit_nr);
349 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
357 * vgic_its_save_pending_tables - Save the pending tables into guest RAM
358 * kvm lock and all vcpu lock must be held
360 int vgic_v3_save_pending_tables(struct kvm *kvm)
362 struct vgic_dist *dist = &kvm->arch.vgic;
363 int last_byte_offset = -1;
364 struct vgic_irq *irq;
368 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
369 int byte_offset, bit_nr;
370 struct kvm_vcpu *vcpu;
374 vcpu = irq->target_vcpu;
378 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
380 byte_offset = irq->intid / BITS_PER_BYTE;
381 bit_nr = irq->intid % BITS_PER_BYTE;
382 ptr = pendbase + byte_offset;
384 if (byte_offset != last_byte_offset) {
385 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
388 last_byte_offset = byte_offset;
391 stored = val & (1U << bit_nr);
392 if (stored == irq->pending_latch)
395 if (irq->pending_latch)
398 val &= ~(1 << bit_nr);
400 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
408 * vgic_v3_rdist_overlap - check if a region overlaps with any
409 * existing redistributor region
412 * @base: base of the region
413 * @size: size of region
415 * Return: true if there is an overlap
417 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
419 struct vgic_dist *d = &kvm->arch.vgic;
420 struct vgic_redist_region *rdreg;
422 list_for_each_entry(rdreg, &d->rd_regions, list) {
423 if ((base + size > rdreg->base) &&
424 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
431 * Check for overlapping regions and for regions crossing the end of memory
432 * for base addresses which have already been set.
434 bool vgic_v3_check_base(struct kvm *kvm)
436 struct vgic_dist *d = &kvm->arch.vgic;
437 struct vgic_redist_region *rdreg;
439 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
440 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
443 list_for_each_entry(rdreg, &d->rd_regions, list) {
444 if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
449 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
452 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
453 KVM_VGIC_V3_DIST_SIZE);
457 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
458 * which has free space to put a new rdist region.
460 * @rd_regions: redistributor region list head
462 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
463 * Stride between redistributors is 0 and regions are filled in the index order.
465 * Return: the redist region handle, if any, that has space to map a new rdist
468 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
470 struct vgic_redist_region *rdreg;
472 list_for_each_entry(rdreg, rd_regions, list) {
473 if (!vgic_v3_redist_region_full(rdreg))
479 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
482 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
483 struct vgic_redist_region *rdreg;
485 list_for_each_entry(rdreg, rd_regions, list) {
486 if (rdreg->index == index)
493 int vgic_v3_map_resources(struct kvm *kvm)
495 struct vgic_dist *dist = &kvm->arch.vgic;
496 struct kvm_vcpu *vcpu;
503 kvm_for_each_vcpu(c, vcpu, kvm) {
504 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
506 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
507 kvm_debug("vcpu %d redistributor base not set\n", c);
513 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
514 kvm_err("Need to set vgic distributor addresses first\n");
519 if (!vgic_v3_check_base(kvm)) {
520 kvm_err("VGIC redist and dist frames overlap\n");
526 * For a VGICv3 we require the userland to explicitly initialize
527 * the VGIC before we need to use it.
529 if (!vgic_initialized(kvm)) {
534 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
536 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
546 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
548 static int __init early_group0_trap_cfg(char *buf)
550 return strtobool(buf, &group0_trap);
552 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
554 static int __init early_group1_trap_cfg(char *buf)
556 return strtobool(buf, &group1_trap);
558 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
560 static int __init early_common_trap_cfg(char *buf)
562 return strtobool(buf, &common_trap);
564 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
566 static int __init early_gicv4_enable(char *buf)
568 return strtobool(buf, &gicv4_enable);
570 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
573 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
574 * @node: pointer to the DT node
576 * Returns 0 if a GICv3 has been found, returns an error code otherwise
578 int vgic_v3_probe(const struct gic_kvm_info *info)
580 u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
584 * The ListRegs field is 5 bits, but there is a architectural
585 * maximum of 16 list registers. Just ignore bit 4...
587 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
588 kvm_vgic_global_state.can_emulate_gicv2 = false;
589 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
593 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
594 kvm_info("GICv4 support %sabled\n",
595 gicv4_enable ? "en" : "dis");
598 if (!info->vcpu.start) {
599 kvm_info("GICv3: no GICV resource entry\n");
600 kvm_vgic_global_state.vcpu_base = 0;
601 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
602 pr_warn("GICV physical address 0x%llx not page aligned\n",
603 (unsigned long long)info->vcpu.start);
604 kvm_vgic_global_state.vcpu_base = 0;
606 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
607 kvm_vgic_global_state.can_emulate_gicv2 = true;
608 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
610 kvm_err("Cannot register GICv2 KVM device.\n");
613 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
615 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
617 kvm_err("Cannot register GICv3 KVM device.\n");
618 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
622 if (kvm_vgic_global_state.vcpu_base == 0)
623 kvm_info("disabling GICv2 emulation\n");
626 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
632 if (group0_trap || group1_trap || common_trap) {
633 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
634 group0_trap ? "G0" : "",
635 group1_trap ? "G1" : "",
636 common_trap ? "C" : "");
637 static_branch_enable(&vgic_v3_cpuif_trap);
640 kvm_vgic_global_state.vctrl_base = NULL;
641 kvm_vgic_global_state.type = VGIC_V3;
642 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
647 void vgic_v3_load(struct kvm_vcpu *vcpu)
649 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
652 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
653 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
654 * VMCR_EL2 save/restore in the world switch.
656 if (likely(cpu_if->vgic_sre))
657 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
659 kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
662 __vgic_v3_activate_traps(vcpu);
665 void vgic_v3_put(struct kvm_vcpu *vcpu)
667 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
669 if (likely(cpu_if->vgic_sre))
670 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
672 kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
675 __vgic_v3_deactivate_traps(vcpu);