2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96 static void vgic_update_state(struct kvm *kvm);
97 static void vgic_kick_vcpus(struct kvm *kvm);
98 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
99 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
100 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
101 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
102 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
103 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
105 static const struct vgic_ops *vgic_ops;
106 static const struct vgic_params *vgic;
109 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
110 * extracts u32s out of them.
112 * This does not work on 64-bit BE systems, because the bitmap access
113 * will store two consecutive 32-bit words with the higher-addressed
114 * register's bits at the lower index and the lower-addressed register's
115 * bits at the higher index.
117 * Therefore, swizzle the register index when accessing the 32-bit word
118 * registers to access the right register's value.
120 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
121 #define REG_OFFSET_SWIZZLE 1
123 #define REG_OFFSET_SWIZZLE 0
126 static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
130 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
132 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
136 b->shared = b->private + nr_cpus;
141 static void vgic_free_bitmap(struct vgic_bitmap *b)
149 * Call this function to convert a u64 value to an unsigned long * bitmask
150 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
152 * Warning: Calling this function may modify *val.
154 static unsigned long *u64_to_bitmask(u64 *val)
156 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
157 *val = (*val >> 32) | (*val << 32);
159 return (unsigned long *)val;
162 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
163 int cpuid, u32 offset)
167 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
169 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
172 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
175 if (irq < VGIC_NR_PRIVATE_IRQS)
176 return test_bit(irq, x->private + cpuid);
178 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
181 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
186 if (irq < VGIC_NR_PRIVATE_IRQS) {
187 reg = x->private + cpuid;
190 irq -= VGIC_NR_PRIVATE_IRQS;
199 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
201 return x->private + cpuid;
204 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
209 static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
213 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
214 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
216 x->private = kzalloc(size, GFP_KERNEL);
220 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
224 static void vgic_free_bytemap(struct vgic_bytemap *b)
231 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
235 if (offset < VGIC_NR_PRIVATE_IRQS) {
237 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
240 offset -= VGIC_NR_PRIVATE_IRQS;
243 return reg + (offset / sizeof(u32));
246 #define VGIC_CFG_LEVEL 0
247 #define VGIC_CFG_EDGE 1
249 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
251 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
254 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
255 return irq_val == VGIC_CFG_EDGE;
258 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
260 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
262 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
265 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
267 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
269 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
272 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
274 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
276 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
279 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
281 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
283 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
286 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
288 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
290 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
293 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
295 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
297 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
300 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
302 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
304 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
307 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
309 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
311 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
314 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
316 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
318 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
321 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
323 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
325 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
328 static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
330 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
332 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
335 static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
337 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
339 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
342 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
344 if (irq < VGIC_NR_PRIVATE_IRQS)
345 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
347 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
348 vcpu->arch.vgic_cpu.pending_shared);
351 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
353 if (irq < VGIC_NR_PRIVATE_IRQS)
354 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
356 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
357 vcpu->arch.vgic_cpu.pending_shared);
360 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
362 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
365 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
367 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
370 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
372 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
376 * vgic_reg_access - access vgic register
377 * @mmio: pointer to the data describing the mmio access
378 * @reg: pointer to the virtual backing of vgic distributor data
379 * @offset: least significant 2 bits used for word offset
380 * @mode: ACCESS_ mode (see defines above)
382 * Helper to make vgic register access easier using one of the access
383 * modes defined for vgic register access
384 * (read,raz,write-ignored,setbit,clearbit,write)
386 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
387 phys_addr_t offset, int mode)
389 int word_offset = (offset & 3) * 8;
390 u32 mask = (1UL << (mmio->len * 8)) - 1;
394 * Any alignment fault should have been delivered to the guest
395 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
401 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
405 if (mmio->is_write) {
406 u32 data = mmio_data_read(mmio, mask) << word_offset;
407 switch (ACCESS_WRITE_MASK(mode)) {
408 case ACCESS_WRITE_IGNORED:
411 case ACCESS_WRITE_SETBIT:
415 case ACCESS_WRITE_CLEARBIT:
419 case ACCESS_WRITE_VALUE:
420 regval = (regval & ~(mask << word_offset)) | data;
425 switch (ACCESS_READ_MASK(mode)) {
426 case ACCESS_READ_RAZ:
430 case ACCESS_READ_VALUE:
431 mmio_data_write(mmio, mask, regval >> word_offset);
436 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
437 struct kvm_exit_mmio *mmio, phys_addr_t offset)
440 u32 word_offset = offset & 3;
442 switch (offset & ~3) {
443 case 0: /* GICD_CTLR */
444 reg = vcpu->kvm->arch.vgic.enabled;
445 vgic_reg_access(mmio, ®, word_offset,
446 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
447 if (mmio->is_write) {
448 vcpu->kvm->arch.vgic.enabled = reg & 1;
449 vgic_update_state(vcpu->kvm);
454 case 4: /* GICD_TYPER */
455 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
456 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
457 vgic_reg_access(mmio, ®, word_offset,
458 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
461 case 8: /* GICD_IIDR */
462 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
463 vgic_reg_access(mmio, ®, word_offset,
464 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
471 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
472 struct kvm_exit_mmio *mmio, phys_addr_t offset)
474 vgic_reg_access(mmio, NULL, offset,
475 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
479 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
480 struct kvm_exit_mmio *mmio,
483 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
484 vcpu->vcpu_id, offset);
485 vgic_reg_access(mmio, reg, offset,
486 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
487 if (mmio->is_write) {
488 vgic_update_state(vcpu->kvm);
495 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
496 struct kvm_exit_mmio *mmio,
499 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
500 vcpu->vcpu_id, offset);
501 vgic_reg_access(mmio, reg, offset,
502 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
503 if (mmio->is_write) {
504 if (offset < 4) /* Force SGI enabled */
506 vgic_retire_disabled_irqs(vcpu);
507 vgic_update_state(vcpu->kvm);
514 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
515 struct kvm_exit_mmio *mmio,
520 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
522 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
523 level_mask = (~(*reg));
525 /* Mark both level and edge triggered irqs as pending */
526 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
528 vgic_reg_access(mmio, reg, offset,
529 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
531 if (mmio->is_write) {
532 /* Set the soft-pending flag only for level-triggered irqs */
533 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
534 vcpu->vcpu_id, offset);
535 vgic_reg_access(mmio, reg, offset,
536 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
539 /* Ignore writes to SGIs */
542 *reg |= orig & 0xffff;
545 vgic_update_state(vcpu->kvm);
552 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
553 struct kvm_exit_mmio *mmio,
558 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
560 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
562 vgic_reg_access(mmio, reg, offset,
563 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
564 if (mmio->is_write) {
565 /* Re-set level triggered level-active interrupts */
566 level_active = vgic_bitmap_get_reg(&dist->irq_level,
567 vcpu->vcpu_id, offset);
568 reg = vgic_bitmap_get_reg(&dist->irq_pending,
569 vcpu->vcpu_id, offset);
570 *reg |= *level_active;
572 /* Ignore writes to SGIs */
575 *reg |= orig & 0xffff;
578 /* Clear soft-pending flags */
579 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
580 vcpu->vcpu_id, offset);
581 vgic_reg_access(mmio, reg, offset,
582 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
584 vgic_update_state(vcpu->kvm);
591 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
592 struct kvm_exit_mmio *mmio,
595 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
596 vcpu->vcpu_id, offset);
597 vgic_reg_access(mmio, reg, offset,
598 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
602 #define GICD_ITARGETSR_SIZE 32
603 #define GICD_CPUTARGETS_BITS 8
604 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
605 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
607 struct vgic_dist *dist = &kvm->arch.vgic;
611 irq -= VGIC_NR_PRIVATE_IRQS;
613 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
614 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
619 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
621 struct vgic_dist *dist = &kvm->arch.vgic;
622 struct kvm_vcpu *vcpu;
627 irq -= VGIC_NR_PRIVATE_IRQS;
630 * Pick the LSB in each byte. This ensures we target exactly
631 * one vcpu per IRQ. If the byte is null, assume we target
634 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
635 int shift = i * GICD_CPUTARGETS_BITS;
636 target = ffs((val >> shift) & 0xffU);
637 target = target ? (target - 1) : 0;
638 dist->irq_spi_cpu[irq + i] = target;
639 kvm_for_each_vcpu(c, vcpu, kvm) {
640 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
642 set_bit(irq + i, bmap);
644 clear_bit(irq + i, bmap);
649 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
650 struct kvm_exit_mmio *mmio,
655 /* We treat the banked interrupts targets as read-only */
657 u32 roreg = 1 << vcpu->vcpu_id;
659 roreg |= roreg << 16;
661 vgic_reg_access(mmio, &roreg, offset,
662 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
666 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
667 vgic_reg_access(mmio, ®, offset,
668 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
669 if (mmio->is_write) {
670 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
671 vgic_update_state(vcpu->kvm);
678 static u32 vgic_cfg_expand(u16 val)
684 * Turn a 16bit value like abcd...mnop into a 32bit word
685 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
687 for (i = 0; i < 16; i++)
688 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
693 static u16 vgic_cfg_compress(u32 val)
699 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
700 * abcd...mnop which is what we really care about.
702 for (i = 0; i < 16; i++)
703 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
709 * The distributor uses 2 bits per IRQ for the CFG register, but the
710 * LSB is always 0. As such, we only keep the upper bit, and use the
711 * two above functions to compress/expand the bits
713 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
714 struct kvm_exit_mmio *mmio, phys_addr_t offset)
719 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
720 vcpu->vcpu_id, offset >> 1);
727 val = vgic_cfg_expand(val);
728 vgic_reg_access(mmio, &val, offset,
729 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
730 if (mmio->is_write) {
732 *reg = ~0U; /* Force PPIs/SGIs to 1 */
736 val = vgic_cfg_compress(val);
741 *reg &= 0xffff << 16;
749 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
750 struct kvm_exit_mmio *mmio, phys_addr_t offset)
753 vgic_reg_access(mmio, ®, offset,
754 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
755 if (mmio->is_write) {
756 vgic_dispatch_sgi(vcpu, reg);
757 vgic_update_state(vcpu->kvm);
765 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
766 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
768 * Move any pending IRQs that have already been assigned to LRs back to the
769 * emulated distributor state so that the complete emulated state can be read
770 * from the main emulation structures without investigating the LRs.
772 * Note that IRQs in the active state in the LRs get their pending state moved
773 * to the distributor but the active state stays in the LRs, because we don't
774 * track the active state on the distributor side.
776 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
778 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
779 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
780 int vcpu_id = vcpu->vcpu_id;
783 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
784 struct vgic_lr lr = vgic_get_lr(vcpu, i);
787 * There are three options for the state bits:
791 * 11: pending and active
793 * If the LR holds only an active interrupt (not pending) then
794 * just leave it alone.
796 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
800 * Reestablish the pending state on the distributor and the
801 * CPU interface. It may have already been pending, but that
802 * is fine, then we are only setting a few bits that were
805 vgic_dist_irq_set_pending(vcpu, lr.irq);
806 if (lr.irq < VGIC_NR_SGIS)
807 *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
808 lr.state &= ~LR_STATE_PENDING;
809 vgic_set_lr(vcpu, i, lr);
812 * If there's no state left on the LR (it could still be
813 * active), then the LR does not hold any useful info and can
814 * be marked as free for other use.
816 if (!(lr.state & LR_STATE_MASK)) {
817 vgic_retire_lr(i, lr.irq, vcpu);
818 vgic_irq_clear_queued(vcpu, lr.irq);
821 /* Finally update the VGIC state. */
822 vgic_update_state(vcpu->kvm);
826 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
827 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
828 struct kvm_exit_mmio *mmio,
831 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
833 int min_sgi = (offset & ~0x3);
834 int max_sgi = min_sgi + 3;
835 int vcpu_id = vcpu->vcpu_id;
838 /* Copy source SGIs from distributor side */
839 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
840 int shift = 8 * (sgi - min_sgi);
841 reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
844 mmio_data_write(mmio, ~0, reg);
848 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
849 struct kvm_exit_mmio *mmio,
850 phys_addr_t offset, bool set)
852 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
854 int min_sgi = (offset & ~0x3);
855 int max_sgi = min_sgi + 3;
856 int vcpu_id = vcpu->vcpu_id;
858 bool updated = false;
860 reg = mmio_data_read(mmio, ~0);
862 /* Clear pending SGIs on the distributor */
863 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
864 u8 mask = reg >> (8 * (sgi - min_sgi));
865 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
867 if ((*src & mask) != mask)
878 vgic_update_state(vcpu->kvm);
883 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
884 struct kvm_exit_mmio *mmio,
888 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
890 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
893 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
894 struct kvm_exit_mmio *mmio,
898 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
900 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
904 * I would have liked to use the kvm_bus_io_*() API instead, but it
905 * cannot cope with banked registers (only the VM pointer is passed
906 * around, and we need the vcpu). One of these days, someone please
913 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
917 static const struct mmio_range vgic_dist_ranges[] = {
919 .base = GIC_DIST_CTRL,
922 .handle_mmio = handle_mmio_misc,
925 .base = GIC_DIST_IGROUP,
926 .len = VGIC_MAX_IRQS / 8,
928 .handle_mmio = handle_mmio_raz_wi,
931 .base = GIC_DIST_ENABLE_SET,
932 .len = VGIC_MAX_IRQS / 8,
934 .handle_mmio = handle_mmio_set_enable_reg,
937 .base = GIC_DIST_ENABLE_CLEAR,
938 .len = VGIC_MAX_IRQS / 8,
940 .handle_mmio = handle_mmio_clear_enable_reg,
943 .base = GIC_DIST_PENDING_SET,
944 .len = VGIC_MAX_IRQS / 8,
946 .handle_mmio = handle_mmio_set_pending_reg,
949 .base = GIC_DIST_PENDING_CLEAR,
950 .len = VGIC_MAX_IRQS / 8,
952 .handle_mmio = handle_mmio_clear_pending_reg,
955 .base = GIC_DIST_ACTIVE_SET,
956 .len = VGIC_MAX_IRQS / 8,
958 .handle_mmio = handle_mmio_raz_wi,
961 .base = GIC_DIST_ACTIVE_CLEAR,
962 .len = VGIC_MAX_IRQS / 8,
964 .handle_mmio = handle_mmio_raz_wi,
967 .base = GIC_DIST_PRI,
968 .len = VGIC_MAX_IRQS,
970 .handle_mmio = handle_mmio_priority_reg,
973 .base = GIC_DIST_TARGET,
974 .len = VGIC_MAX_IRQS,
976 .handle_mmio = handle_mmio_target_reg,
979 .base = GIC_DIST_CONFIG,
980 .len = VGIC_MAX_IRQS / 4,
982 .handle_mmio = handle_mmio_cfg_reg,
985 .base = GIC_DIST_SOFTINT,
987 .handle_mmio = handle_mmio_sgi_reg,
990 .base = GIC_DIST_SGI_PENDING_CLEAR,
992 .handle_mmio = handle_mmio_sgi_clear,
995 .base = GIC_DIST_SGI_PENDING_SET,
997 .handle_mmio = handle_mmio_sgi_set,
1003 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
1004 struct kvm_exit_mmio *mmio,
1007 const struct mmio_range *r = ranges;
1010 if (offset >= r->base &&
1011 (offset + mmio->len) <= (r->base + r->len))
1019 static bool vgic_validate_access(const struct vgic_dist *dist,
1020 const struct mmio_range *range,
1021 unsigned long offset)
1025 if (!range->bits_per_irq)
1026 return true; /* Not an irq-based access */
1028 irq = offset * 8 / range->bits_per_irq;
1029 if (irq >= dist->nr_irqs)
1036 * vgic_handle_mmio - handle an in-kernel MMIO access
1037 * @vcpu: pointer to the vcpu performing the access
1038 * @run: pointer to the kvm_run structure
1039 * @mmio: pointer to the data describing the access
1041 * returns true if the MMIO access has been performed in kernel space,
1042 * and false if it needs to be emulated in user space.
1044 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
1045 struct kvm_exit_mmio *mmio)
1047 const struct mmio_range *range;
1048 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1049 unsigned long base = dist->vgic_dist_base;
1051 unsigned long offset;
1053 if (!irqchip_in_kernel(vcpu->kvm) ||
1054 mmio->phys_addr < base ||
1055 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
1058 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
1059 if (mmio->len > 4) {
1060 kvm_inject_dabt(vcpu, mmio->phys_addr);
1064 offset = mmio->phys_addr - base;
1065 range = find_matching_range(vgic_dist_ranges, mmio, offset);
1066 if (unlikely(!range || !range->handle_mmio)) {
1067 pr_warn("Unhandled access %d %08llx %d\n",
1068 mmio->is_write, mmio->phys_addr, mmio->len);
1072 spin_lock(&vcpu->kvm->arch.vgic.lock);
1073 offset = mmio->phys_addr - range->base - base;
1074 if (vgic_validate_access(dist, range, offset)) {
1075 updated_state = range->handle_mmio(vcpu, mmio, offset);
1077 vgic_reg_access(mmio, NULL, offset,
1078 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
1079 updated_state = false;
1081 spin_unlock(&vcpu->kvm->arch.vgic.lock);
1082 kvm_prepare_mmio(run, mmio);
1083 kvm_handle_mmio_return(vcpu, run);
1086 vgic_kick_vcpus(vcpu->kvm);
1091 static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
1093 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
1096 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
1098 struct kvm *kvm = vcpu->kvm;
1099 struct vgic_dist *dist = &kvm->arch.vgic;
1100 int nrcpus = atomic_read(&kvm->online_vcpus);
1102 int sgi, mode, c, vcpu_id;
1104 vcpu_id = vcpu->vcpu_id;
1107 target_cpus = (reg >> 16) & 0xff;
1108 mode = (reg >> 24) & 3;
1117 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
1121 target_cpus = 1 << vcpu_id;
1125 kvm_for_each_vcpu(c, vcpu, kvm) {
1126 if (target_cpus & 1) {
1127 /* Flag the SGI as pending */
1128 vgic_dist_irq_set_pending(vcpu, sgi);
1129 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
1130 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1137 static int vgic_nr_shared_irqs(struct vgic_dist *dist)
1139 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
1142 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
1144 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1145 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
1146 unsigned long pending_private, pending_shared;
1147 int nr_shared = vgic_nr_shared_irqs(dist);
1150 vcpu_id = vcpu->vcpu_id;
1151 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
1152 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
1154 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1155 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
1156 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
1158 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1159 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
1160 bitmap_and(pend_shared, pending, enabled, nr_shared);
1161 bitmap_and(pend_shared, pend_shared,
1162 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
1165 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1166 pending_shared = find_first_bit(pend_shared, nr_shared);
1167 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1168 pending_shared < vgic_nr_shared_irqs(dist));
1172 * Update the interrupt state and determine which CPUs have pending
1173 * interrupts. Must be called with distributor lock held.
1175 static void vgic_update_state(struct kvm *kvm)
1177 struct vgic_dist *dist = &kvm->arch.vgic;
1178 struct kvm_vcpu *vcpu;
1181 if (!dist->enabled) {
1182 set_bit(0, dist->irq_pending_on_cpu);
1186 kvm_for_each_vcpu(c, vcpu, kvm) {
1187 if (compute_pending_for_cpu(vcpu)) {
1188 pr_debug("CPU%d has pending interrupts\n", c);
1189 set_bit(c, dist->irq_pending_on_cpu);
1194 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1196 return vgic_ops->get_lr(vcpu, lr);
1199 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1202 vgic_ops->set_lr(vcpu, lr, vlr);
1205 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1208 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1211 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1213 return vgic_ops->get_elrsr(vcpu);
1216 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1218 return vgic_ops->get_eisr(vcpu);
1221 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1223 return vgic_ops->get_interrupt_status(vcpu);
1226 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1228 vgic_ops->enable_underflow(vcpu);
1231 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1233 vgic_ops->disable_underflow(vcpu);
1236 static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1238 vgic_ops->get_vmcr(vcpu, vmcr);
1241 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1243 vgic_ops->set_vmcr(vcpu, vmcr);
1246 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1248 vgic_ops->enable(vcpu);
1251 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1253 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1254 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1257 vgic_set_lr(vcpu, lr_nr, vlr);
1258 clear_bit(lr_nr, vgic_cpu->lr_used);
1259 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1263 * An interrupt may have been disabled after being made pending on the
1264 * CPU interface (the classic case is a timer running while we're
1265 * rebooting the guest - the interrupt would kick as soon as the CPU
1266 * interface gets enabled, with deadly consequences).
1268 * The solution is to examine already active LRs, and check the
1269 * interrupt is still enabled. If not, just retire it.
1271 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1273 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1276 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1277 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1279 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1280 vgic_retire_lr(lr, vlr.irq, vcpu);
1281 if (vgic_irq_is_queued(vcpu, vlr.irq))
1282 vgic_irq_clear_queued(vcpu, vlr.irq);
1288 * Queue an interrupt to a CPU virtual interface. Return true on success,
1289 * or false if it wasn't possible to queue it.
1291 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1293 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1294 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1298 /* Sanitize the input... */
1299 BUG_ON(sgi_source_id & ~7);
1300 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1301 BUG_ON(irq >= dist->nr_irqs);
1303 kvm_debug("Queue IRQ%d\n", irq);
1305 lr = vgic_cpu->vgic_irq_lr_map[irq];
1307 /* Do we have an active interrupt for the same CPUID? */
1308 if (lr != LR_EMPTY) {
1309 vlr = vgic_get_lr(vcpu, lr);
1310 if (vlr.source == sgi_source_id) {
1311 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1312 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1313 vlr.state |= LR_STATE_PENDING;
1314 vgic_set_lr(vcpu, lr, vlr);
1319 /* Try to use another LR for this interrupt */
1320 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1322 if (lr >= vgic->nr_lr)
1325 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1326 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1327 set_bit(lr, vgic_cpu->lr_used);
1330 vlr.source = sgi_source_id;
1331 vlr.state = LR_STATE_PENDING;
1332 if (!vgic_irq_is_edge(vcpu, irq))
1333 vlr.state |= LR_EOI_INT;
1335 vgic_set_lr(vcpu, lr, vlr);
1340 static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1342 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1343 unsigned long sources;
1344 int vcpu_id = vcpu->vcpu_id;
1347 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
1349 for_each_set_bit(c, &sources, dist->nr_cpus) {
1350 if (vgic_queue_irq(vcpu, c, irq))
1351 clear_bit(c, &sources);
1354 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
1357 * If the sources bitmap has been cleared it means that we
1358 * could queue all the SGIs onto link registers (see the
1359 * clear_bit above), and therefore we are done with them in
1360 * our emulated gic and can get rid of them.
1363 vgic_dist_irq_clear_pending(vcpu, irq);
1364 vgic_cpu_irq_clear(vcpu, irq);
1371 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1373 if (!vgic_can_sample_irq(vcpu, irq))
1374 return true; /* level interrupt, already queued */
1376 if (vgic_queue_irq(vcpu, 0, irq)) {
1377 if (vgic_irq_is_edge(vcpu, irq)) {
1378 vgic_dist_irq_clear_pending(vcpu, irq);
1379 vgic_cpu_irq_clear(vcpu, irq);
1381 vgic_irq_set_queued(vcpu, irq);
1391 * Fill the list registers with pending interrupts before running the
1394 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1396 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1397 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1401 vcpu_id = vcpu->vcpu_id;
1404 * We may not have any pending interrupt, or the interrupts
1405 * may have been serviced from another vcpu. In all cases,
1408 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1409 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1414 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1415 if (!vgic_queue_sgi(vcpu, i))
1420 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1421 if (!vgic_queue_hwirq(vcpu, i))
1426 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
1427 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1433 vgic_enable_underflow(vcpu);
1435 vgic_disable_underflow(vcpu);
1437 * We're about to run this VCPU, and we've consumed
1438 * everything the distributor had in store for
1439 * us. Claim we don't have anything pending. We'll
1440 * adjust that if needed while exiting.
1442 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1446 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1448 u32 status = vgic_get_interrupt_status(vcpu);
1449 bool level_pending = false;
1451 kvm_debug("STATUS = %08x\n", status);
1453 if (status & INT_STATUS_EOI) {
1455 * Some level interrupts have been EOIed. Clear their
1458 u64 eisr = vgic_get_eisr(vcpu);
1459 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1462 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1463 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1464 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1466 vgic_irq_clear_queued(vcpu, vlr.irq);
1467 WARN_ON(vlr.state & LR_STATE_MASK);
1469 vgic_set_lr(vcpu, lr, vlr);
1472 * If the IRQ was EOIed it was also ACKed and we we
1473 * therefore assume we can clear the soft pending
1474 * state (should it had been set) for this interrupt.
1476 * Note: if the IRQ soft pending state was set after
1477 * the IRQ was acked, it actually shouldn't be
1478 * cleared, but we have no way of knowing that unless
1479 * we start trapping ACKs when the soft-pending state
1482 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1484 /* Any additional pending interrupt? */
1485 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1486 vgic_cpu_irq_set(vcpu, vlr.irq);
1487 level_pending = true;
1489 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1490 vgic_cpu_irq_clear(vcpu, vlr.irq);
1494 * Despite being EOIed, the LR may not have
1495 * been marked as empty.
1497 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1501 if (status & INT_STATUS_UNDERFLOW)
1502 vgic_disable_underflow(vcpu);
1504 return level_pending;
1508 * Sync back the VGIC state after a guest run. The distributor lock is
1509 * needed so we don't get preempted in the middle of the state processing.
1511 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1513 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1514 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1516 unsigned long *elrsr_ptr;
1520 level_pending = vgic_process_maintenance(vcpu);
1521 elrsr = vgic_get_elrsr(vcpu);
1522 elrsr_ptr = u64_to_bitmask(&elrsr);
1524 /* Clear mappings for empty LRs */
1525 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1528 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1531 vlr = vgic_get_lr(vcpu, lr);
1533 BUG_ON(vlr.irq >= dist->nr_irqs);
1534 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1537 /* Check if we still have something up our sleeve... */
1538 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1539 if (level_pending || pending < vgic->nr_lr)
1540 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1543 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1545 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1547 if (!irqchip_in_kernel(vcpu->kvm))
1550 spin_lock(&dist->lock);
1551 __kvm_vgic_flush_hwstate(vcpu);
1552 spin_unlock(&dist->lock);
1555 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1557 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1559 if (!irqchip_in_kernel(vcpu->kvm))
1562 spin_lock(&dist->lock);
1563 __kvm_vgic_sync_hwstate(vcpu);
1564 spin_unlock(&dist->lock);
1567 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1569 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1571 if (!irqchip_in_kernel(vcpu->kvm))
1574 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1577 static void vgic_kick_vcpus(struct kvm *kvm)
1579 struct kvm_vcpu *vcpu;
1583 * We've injected an interrupt, time to find out who deserves
1586 kvm_for_each_vcpu(c, vcpu, kvm) {
1587 if (kvm_vgic_vcpu_pending_irq(vcpu))
1588 kvm_vcpu_kick(vcpu);
1592 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1594 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1597 * Only inject an interrupt if:
1598 * - edge triggered and we have a rising edge
1599 * - level triggered and we change level
1601 if (edge_triggered) {
1602 int state = vgic_dist_irq_is_pending(vcpu, irq);
1603 return level > state;
1605 int state = vgic_dist_irq_get_level(vcpu, irq);
1606 return level != state;
1610 static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1611 unsigned int irq_num, bool level)
1613 struct vgic_dist *dist = &kvm->arch.vgic;
1614 struct kvm_vcpu *vcpu;
1615 int edge_triggered, level_triggered;
1619 spin_lock(&dist->lock);
1621 vcpu = kvm_get_vcpu(kvm, cpuid);
1622 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1623 level_triggered = !edge_triggered;
1625 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1630 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1631 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1632 vcpu = kvm_get_vcpu(kvm, cpuid);
1635 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1638 if (level_triggered)
1639 vgic_dist_irq_set_level(vcpu, irq_num);
1640 vgic_dist_irq_set_pending(vcpu, irq_num);
1642 if (level_triggered) {
1643 vgic_dist_irq_clear_level(vcpu, irq_num);
1644 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1645 vgic_dist_irq_clear_pending(vcpu, irq_num);
1647 vgic_dist_irq_clear_pending(vcpu, irq_num);
1651 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1658 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1660 * Level interrupt in progress, will be picked up
1668 vgic_cpu_irq_set(vcpu, irq_num);
1669 set_bit(cpuid, dist->irq_pending_on_cpu);
1673 spin_unlock(&dist->lock);
1679 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1680 * @kvm: The VM structure pointer
1681 * @cpuid: The CPU for PPIs
1682 * @irq_num: The IRQ number that is assigned to the device
1683 * @level: Edge-triggered: true: to trigger the interrupt
1684 * false: to ignore the call
1685 * Level-sensitive true: activates an interrupt
1686 * false: deactivates an interrupt
1688 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1689 * level-sensitive interrupts. You can think of the level parameter as 1
1690 * being HIGH and 0 being LOW and all devices being active-HIGH.
1692 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1695 if (likely(vgic_initialized(kvm)) &&
1696 vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1697 vgic_kick_vcpus(kvm);
1702 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1705 * We cannot rely on the vgic maintenance interrupt to be
1706 * delivered synchronously. This means we can only use it to
1707 * exit the VM, and we perform the handling of EOIed
1708 * interrupts on the exit path (see vgic_process_maintenance).
1713 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1715 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1717 kfree(vgic_cpu->pending_shared);
1718 kfree(vgic_cpu->vgic_irq_lr_map);
1719 vgic_cpu->pending_shared = NULL;
1720 vgic_cpu->vgic_irq_lr_map = NULL;
1723 static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1725 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1727 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1728 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1729 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
1731 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1732 kvm_vgic_vcpu_destroy(vcpu);
1740 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1741 * @vcpu: pointer to the vcpu struct
1743 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1744 * this vcpu and enable the VGIC for this VCPU
1746 static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1748 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1749 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1752 for (i = 0; i < dist->nr_irqs; i++) {
1753 if (i < VGIC_NR_PPIS)
1754 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1755 vcpu->vcpu_id, i, 1);
1756 if (i < VGIC_NR_PRIVATE_IRQS)
1757 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1758 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1760 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1764 * Store the number of LRs per vcpu, so we don't have to go
1765 * all the way to the distributor structure to find out. Only
1766 * assembly code should use this one.
1768 vgic_cpu->nr_lr = vgic->nr_lr;
1773 void kvm_vgic_destroy(struct kvm *kvm)
1775 struct vgic_dist *dist = &kvm->arch.vgic;
1776 struct kvm_vcpu *vcpu;
1779 kvm_for_each_vcpu(i, vcpu, kvm)
1780 kvm_vgic_vcpu_destroy(vcpu);
1782 vgic_free_bitmap(&dist->irq_enabled);
1783 vgic_free_bitmap(&dist->irq_level);
1784 vgic_free_bitmap(&dist->irq_pending);
1785 vgic_free_bitmap(&dist->irq_soft_pend);
1786 vgic_free_bitmap(&dist->irq_queued);
1787 vgic_free_bitmap(&dist->irq_cfg);
1788 vgic_free_bytemap(&dist->irq_priority);
1789 if (dist->irq_spi_target) {
1790 for (i = 0; i < dist->nr_cpus; i++)
1791 vgic_free_bitmap(&dist->irq_spi_target[i]);
1793 kfree(dist->irq_sgi_sources);
1794 kfree(dist->irq_spi_cpu);
1795 kfree(dist->irq_spi_target);
1796 kfree(dist->irq_pending_on_cpu);
1797 dist->irq_sgi_sources = NULL;
1798 dist->irq_spi_cpu = NULL;
1799 dist->irq_spi_target = NULL;
1800 dist->irq_pending_on_cpu = NULL;
1804 * Allocate and initialize the various data structures. Must be called
1805 * with kvm->lock held!
1807 static int vgic_init_maps(struct kvm *kvm)
1809 struct vgic_dist *dist = &kvm->arch.vgic;
1810 struct kvm_vcpu *vcpu;
1811 int nr_cpus, nr_irqs;
1814 if (dist->nr_cpus) /* Already allocated */
1817 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1818 if (!nr_cpus) /* No vcpus? Can't be good... */
1822 * If nobody configured the number of interrupts, use the
1826 dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1828 nr_irqs = dist->nr_irqs;
1830 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1831 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1832 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1833 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1834 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1835 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1836 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1841 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1842 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1843 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1845 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1847 if (!dist->irq_sgi_sources ||
1848 !dist->irq_spi_cpu ||
1849 !dist->irq_spi_target ||
1850 !dist->irq_pending_on_cpu) {
1855 for (i = 0; i < nr_cpus; i++)
1856 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1862 kvm_for_each_vcpu(i, vcpu, kvm) {
1863 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1865 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1870 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
1871 vgic_set_target_reg(kvm, 0, i);
1875 kvm_vgic_destroy(kvm);
1881 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1882 * @kvm: pointer to the kvm struct
1884 * Map the virtual CPU interface into the VM before running any VCPUs. We
1885 * can't do this at creation time, because user space must first set the
1886 * virtual CPU interface address in the guest physical address space. Also
1887 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1889 int kvm_vgic_init(struct kvm *kvm)
1891 struct kvm_vcpu *vcpu;
1894 if (!irqchip_in_kernel(kvm))
1897 mutex_lock(&kvm->lock);
1899 if (vgic_initialized(kvm))
1902 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1903 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1904 kvm_err("Need to set vgic cpu and dist addresses first\n");
1909 ret = vgic_init_maps(kvm);
1911 kvm_err("Unable to allocate maps\n");
1915 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1916 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
1919 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1923 kvm_for_each_vcpu(i, vcpu, kvm)
1924 kvm_vgic_vcpu_init(vcpu);
1926 kvm->arch.vgic.ready = true;
1929 kvm_vgic_destroy(kvm);
1930 mutex_unlock(&kvm->lock);
1934 int kvm_vgic_create(struct kvm *kvm)
1936 int i, vcpu_lock_idx = -1, ret = 0;
1937 struct kvm_vcpu *vcpu;
1939 mutex_lock(&kvm->lock);
1941 if (kvm->arch.vgic.vctrl_base) {
1947 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1948 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1949 * that no other VCPUs are run while we create the vgic.
1951 kvm_for_each_vcpu(i, vcpu, kvm) {
1952 if (!mutex_trylock(&vcpu->mutex))
1957 kvm_for_each_vcpu(i, vcpu, kvm) {
1958 if (vcpu->arch.has_run_once) {
1964 spin_lock_init(&kvm->arch.vgic.lock);
1965 kvm->arch.vgic.in_kernel = true;
1966 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1967 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1968 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1971 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1972 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1973 mutex_unlock(&vcpu->mutex);
1977 mutex_unlock(&kvm->lock);
1981 static int vgic_ioaddr_overlap(struct kvm *kvm)
1983 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1984 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1986 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1988 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1989 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1994 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1995 phys_addr_t addr, phys_addr_t size)
1999 if (addr & ~KVM_PHYS_MASK)
2002 if (addr & (SZ_4K - 1))
2005 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2007 if (addr + size < addr)
2011 ret = vgic_ioaddr_overlap(kvm);
2013 *ioaddr = VGIC_ADDR_UNDEF;
2019 * kvm_vgic_addr - set or get vgic VM base addresses
2020 * @kvm: pointer to the vm struct
2021 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
2022 * @addr: pointer to address value
2023 * @write: if true set the address in the VM address space, if false read the
2026 * Set or get the vgic base addresses for the distributor and the virtual CPU
2027 * interface in the VM physical address space. These addresses are properties
2028 * of the emulated core/SoC and therefore user space initially knows this
2031 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2034 struct vgic_dist *vgic = &kvm->arch.vgic;
2036 mutex_lock(&kvm->lock);
2038 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2040 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
2041 *addr, KVM_VGIC_V2_DIST_SIZE);
2043 *addr = vgic->vgic_dist_base;
2046 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2048 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
2049 *addr, KVM_VGIC_V2_CPU_SIZE);
2051 *addr = vgic->vgic_cpu_base;
2058 mutex_unlock(&kvm->lock);
2062 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
2063 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2065 bool updated = false;
2066 struct vgic_vmcr vmcr;
2070 vgic_get_vmcr(vcpu, &vmcr);
2072 switch (offset & ~0x3) {
2074 vmcr_field = &vmcr.ctlr;
2076 case GIC_CPU_PRIMASK:
2077 vmcr_field = &vmcr.pmr;
2079 case GIC_CPU_BINPOINT:
2080 vmcr_field = &vmcr.bpr;
2082 case GIC_CPU_ALIAS_BINPOINT:
2083 vmcr_field = &vmcr.abpr;
2089 if (!mmio->is_write) {
2091 mmio_data_write(mmio, ~0, reg);
2093 reg = mmio_data_read(mmio, ~0);
2094 if (reg != *vmcr_field) {
2096 vgic_set_vmcr(vcpu, &vmcr);
2103 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
2104 struct kvm_exit_mmio *mmio, phys_addr_t offset)
2106 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
2109 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
2110 struct kvm_exit_mmio *mmio,
2119 reg = (PRODUCT_ID_KVM << 20) |
2120 (GICC_ARCH_VERSION_V2 << 16) |
2121 (IMPLEMENTER_ARM << 0);
2122 mmio_data_write(mmio, ~0, reg);
2127 * CPU Interface Register accesses - these are not accessed by the VM, but by
2128 * user space for saving and restoring VGIC state.
2130 static const struct mmio_range vgic_cpu_ranges[] = {
2132 .base = GIC_CPU_CTRL,
2134 .handle_mmio = handle_cpu_mmio_misc,
2137 .base = GIC_CPU_ALIAS_BINPOINT,
2139 .handle_mmio = handle_mmio_abpr,
2142 .base = GIC_CPU_ACTIVEPRIO,
2144 .handle_mmio = handle_mmio_raz_wi,
2147 .base = GIC_CPU_IDENT,
2149 .handle_mmio = handle_cpu_mmio_ident,
2153 static int vgic_attr_regs_access(struct kvm_device *dev,
2154 struct kvm_device_attr *attr,
2155 u32 *reg, bool is_write)
2157 const struct mmio_range *r = NULL, *ranges;
2160 struct kvm_vcpu *vcpu, *tmp_vcpu;
2161 struct vgic_dist *vgic;
2162 struct kvm_exit_mmio mmio;
2164 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2165 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
2166 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
2168 mutex_lock(&dev->kvm->lock);
2170 ret = vgic_init_maps(dev->kvm);
2174 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
2179 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
2180 vgic = &dev->kvm->arch.vgic;
2183 mmio.is_write = is_write;
2185 mmio_data_write(&mmio, ~0, *reg);
2186 switch (attr->group) {
2187 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2188 mmio.phys_addr = vgic->vgic_dist_base + offset;
2189 ranges = vgic_dist_ranges;
2191 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2192 mmio.phys_addr = vgic->vgic_cpu_base + offset;
2193 ranges = vgic_cpu_ranges;
2198 r = find_matching_range(ranges, &mmio, offset);
2200 if (unlikely(!r || !r->handle_mmio)) {
2206 spin_lock(&vgic->lock);
2209 * Ensure that no other VCPU is running by checking the vcpu->cpu
2210 * field. If no other VPCUs are running we can safely access the VGIC
2211 * state, because even if another VPU is run after this point, that
2212 * VCPU will not touch the vgic state, because it will block on
2213 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2215 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
2216 if (unlikely(tmp_vcpu->cpu != -1)) {
2218 goto out_vgic_unlock;
2223 * Move all pending IRQs from the LRs on all VCPUs so the pending
2224 * state can be properly represented in the register state accessible
2227 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
2228 vgic_unqueue_irqs(tmp_vcpu);
2231 r->handle_mmio(vcpu, &mmio, offset);
2234 *reg = mmio_data_read(&mmio, ~0);
2238 spin_unlock(&vgic->lock);
2240 mutex_unlock(&dev->kvm->lock);
2244 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2248 switch (attr->group) {
2249 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2250 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2252 unsigned long type = (unsigned long)attr->attr;
2254 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2257 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2258 return (r == -ENODEV) ? -ENXIO : r;
2261 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2262 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2263 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2266 if (get_user(reg, uaddr))
2269 return vgic_attr_regs_access(dev, attr, ®, true);
2271 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2272 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2276 if (get_user(val, uaddr))
2281 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2282 * - at most 1024 interrupts
2283 * - a multiple of 32 interrupts
2285 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2286 val > VGIC_MAX_IRQS ||
2290 mutex_lock(&dev->kvm->lock);
2292 if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2295 dev->kvm->arch.vgic.nr_irqs = val;
2297 mutex_unlock(&dev->kvm->lock);
2307 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2311 switch (attr->group) {
2312 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2313 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2315 unsigned long type = (unsigned long)attr->attr;
2317 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2319 return (r == -ENODEV) ? -ENXIO : r;
2321 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2326 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2327 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2328 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2331 r = vgic_attr_regs_access(dev, attr, ®, false);
2334 r = put_user(reg, uaddr);
2337 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2338 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2339 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2348 static int vgic_has_attr_regs(const struct mmio_range *ranges,
2351 struct kvm_exit_mmio dev_attr_mmio;
2353 dev_attr_mmio.len = 4;
2354 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2360 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2364 switch (attr->group) {
2365 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2366 switch (attr->attr) {
2367 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2368 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2372 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2373 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2374 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2375 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2376 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2377 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2378 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
2384 static void vgic_destroy(struct kvm_device *dev)
2389 static int vgic_create(struct kvm_device *dev, u32 type)
2391 return kvm_vgic_create(dev->kvm);
2394 static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2395 .name = "kvm-arm-vgic",
2396 .create = vgic_create,
2397 .destroy = vgic_destroy,
2398 .set_attr = vgic_set_attr,
2399 .get_attr = vgic_get_attr,
2400 .has_attr = vgic_has_attr,
2403 static void vgic_init_maintenance_interrupt(void *info)
2405 enable_percpu_irq(vgic->maint_irq, 0);
2408 static int vgic_cpu_notify(struct notifier_block *self,
2409 unsigned long action, void *cpu)
2413 case CPU_STARTING_FROZEN:
2414 vgic_init_maintenance_interrupt(NULL);
2417 case CPU_DYING_FROZEN:
2418 disable_percpu_irq(vgic->maint_irq);
2425 static struct notifier_block vgic_cpu_nb = {
2426 .notifier_call = vgic_cpu_notify,
2429 static const struct of_device_id vgic_ids[] = {
2430 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2431 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2435 int kvm_vgic_hyp_init(void)
2437 const struct of_device_id *matched_id;
2438 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2439 const struct vgic_params **);
2440 struct device_node *vgic_node;
2443 vgic_node = of_find_matching_node_and_match(NULL,
2444 vgic_ids, &matched_id);
2446 kvm_err("error: no compatible GIC node found\n");
2450 vgic_probe = matched_id->data;
2451 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2455 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2456 "vgic", kvm_get_running_vcpus());
2458 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2462 ret = __register_cpu_notifier(&vgic_cpu_nb);
2464 kvm_err("Cannot register vgic CPU notifier\n");
2468 /* Callback into for arch code for setup */
2469 vgic_arch_setup(vgic);
2471 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2473 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2474 KVM_DEV_TYPE_ARM_VGIC_V2);
2477 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());