1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/irqchip/arm-gic-v3.h>
5 #include <linux/irqdomain.h>
7 #include <linux/kvm_host.h>
8 #include <kvm/arm_vgic.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_asm.h>
15 static bool group0_trap;
16 static bool group1_trap;
17 static bool common_trap;
19 static bool gicv4_enable;
21 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
23 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
25 cpuif->vgic_hcr |= ICH_HCR_UIE;
28 static bool lr_signals_eoi_mi(u64 lr_val)
30 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
31 !(lr_val & ICH_LR_HW);
34 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
36 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
37 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
38 u32 model = vcpu->kvm->arch.vgic.vgic_model;
41 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
43 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
45 for (lr = 0; lr < cpuif->used_lrs; lr++) {
46 u64 val = cpuif->vgic_lr[lr];
49 bool is_v2_sgi = false;
52 cpuid = val & GICH_LR_PHYSID_CPUID;
53 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
55 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
56 intid = val & ICH_LR_VIRTUAL_ID_MASK;
58 intid = val & GICH_LR_VIRTUALID;
59 is_v2_sgi = vgic_irq_is_sgi(intid);
62 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
63 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
64 kvm_notify_acked_irq(vcpu->kvm, 0,
65 intid - VGIC_NR_PRIVATE_IRQS);
67 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
68 if (!irq) /* An LPI could have been unmapped. */
71 raw_spin_lock(&irq->irq_lock);
73 /* Always preserve the active bit, note deactivation */
74 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
75 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
77 if (irq->active && is_v2_sgi)
78 irq->active_source = cpuid;
80 /* Edge is the only case where we preserve the pending bit */
81 if (irq->config == VGIC_CONFIG_EDGE &&
82 (val & ICH_LR_PENDING_BIT)) {
83 irq->pending_latch = true;
86 irq->source |= (1 << cpuid);
90 * Clear soft pending state when level irqs have been acked.
92 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
93 irq->pending_latch = false;
95 /* Handle resampling for mapped interrupts if required */
96 vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
98 raw_spin_unlock(&irq->irq_lock);
99 vgic_put_irq(vcpu->kvm, irq);
105 /* Requires the irq to be locked already */
106 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
108 u32 model = vcpu->kvm->arch.vgic.vgic_model;
109 u64 val = irq->intid;
110 bool allow_pending = true, is_v2_sgi;
112 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
113 model == KVM_DEV_TYPE_ARM_VGIC_V2);
116 val |= ICH_LR_ACTIVE_BIT;
118 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
119 if (vgic_irq_is_multi_sgi(irq)) {
120 allow_pending = false;
125 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
127 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
129 * Never set pending+active on a HW interrupt, as the
130 * pending state is kept at the physical distributor
134 allow_pending = false;
136 if (irq->config == VGIC_CONFIG_LEVEL) {
140 * Software resampling doesn't work very well
141 * if we allow P+A, so let's not do that.
144 allow_pending = false;
148 if (allow_pending && irq_is_pending(irq)) {
149 val |= ICH_LR_PENDING_BIT;
151 if (irq->config == VGIC_CONFIG_EDGE)
152 irq->pending_latch = false;
154 if (vgic_irq_is_sgi(irq->intid) &&
155 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
156 u32 src = ffs(irq->source);
158 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
162 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
163 irq->source &= ~(1 << (src - 1));
165 irq->pending_latch = true;
172 * Level-triggered mapped IRQs are special because we only observe
173 * rising edges as input to the VGIC. We therefore lower the line
174 * level here, so that we can take new virtual IRQs. See
175 * vgic_v3_fold_lr_state for more info.
177 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
178 irq->line_level = false;
183 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
185 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
188 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
190 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
193 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
195 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
196 u32 model = vcpu->kvm->arch.vgic.vgic_model;
199 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
200 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
201 ICH_VMCR_ACK_CTL_MASK;
202 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
203 ICH_VMCR_FIQ_EN_MASK;
206 * When emulating GICv3 on GICv3 with SRE=1 on the
207 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
209 vmcr = ICH_VMCR_FIQ_EN_MASK;
212 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
213 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
214 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
215 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
216 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
217 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
218 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
220 cpu_if->vgic_vmcr = vmcr;
223 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
225 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
226 u32 model = vcpu->kvm->arch.vgic.vgic_model;
229 vmcr = cpu_if->vgic_vmcr;
231 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
232 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
233 ICH_VMCR_ACK_CTL_SHIFT;
234 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
235 ICH_VMCR_FIQ_EN_SHIFT;
238 * When emulating GICv3 on GICv3 with SRE=1 on the
239 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
245 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
246 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
247 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
248 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
249 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
250 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
251 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
254 #define INITIAL_PENDBASER_VALUE \
255 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
256 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
257 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
259 void vgic_v3_enable(struct kvm_vcpu *vcpu)
261 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
264 * By forcing VMCR to zero, the GIC will restore the binary
265 * points to their reset values. Anything else resets to zero
268 vgic_v3->vgic_vmcr = 0;
271 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
272 * way, so we force SRE to 1 to demonstrate this to the guest.
273 * Also, we don't support any form of IRQ/FIQ bypass.
274 * This goes with the spec allowing the value to be RAO/WI.
276 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
277 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
280 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
282 vgic_v3->vgic_sre = 0;
285 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
286 ICH_VTR_ID_BITS_MASK) >>
287 ICH_VTR_ID_BITS_SHIFT;
288 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
289 ICH_VTR_PRI_BITS_MASK) >>
290 ICH_VTR_PRI_BITS_SHIFT) + 1;
292 /* Get the show on the road... */
293 vgic_v3->vgic_hcr = ICH_HCR_EN;
295 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
297 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
299 vgic_v3->vgic_hcr |= ICH_HCR_TC;
301 vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
304 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
306 struct kvm_vcpu *vcpu;
307 int byte_offset, bit_nr;
315 vcpu = irq->target_vcpu;
319 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
321 byte_offset = irq->intid / BITS_PER_BYTE;
322 bit_nr = irq->intid % BITS_PER_BYTE;
323 ptr = pendbase + byte_offset;
325 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
329 status = val & (1 << bit_nr);
331 raw_spin_lock_irqsave(&irq->irq_lock, flags);
332 if (irq->target_vcpu != vcpu) {
333 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
336 irq->pending_latch = status;
337 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
340 /* clear consumed data */
341 val &= ~(1 << bit_nr);
342 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
350 * The deactivation of the doorbell interrupt will trigger the
351 * unmapping of the associated vPE.
353 static void unmap_all_vpes(struct vgic_dist *dist)
355 struct irq_desc *desc;
358 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
359 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
360 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
364 static void map_all_vpes(struct vgic_dist *dist)
366 struct irq_desc *desc;
369 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
370 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
371 irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
376 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
377 * kvm lock and all vcpu lock must be held
379 int vgic_v3_save_pending_tables(struct kvm *kvm)
381 struct vgic_dist *dist = &kvm->arch.vgic;
382 struct vgic_irq *irq;
383 gpa_t last_ptr = ~(gpa_t)0;
384 bool vlpi_avail = false;
388 if (unlikely(!vgic_initialized(kvm)))
392 * A preparation for getting any VLPI states.
393 * The above vgic initialized check also ensures that the allocation
394 * and enabling of the doorbells have already been done.
396 if (kvm_vgic_global_state.has_gicv4_1) {
397 unmap_all_vpes(dist);
401 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
402 int byte_offset, bit_nr;
403 struct kvm_vcpu *vcpu;
408 vcpu = irq->target_vcpu;
412 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
414 byte_offset = irq->intid / BITS_PER_BYTE;
415 bit_nr = irq->intid % BITS_PER_BYTE;
416 ptr = pendbase + byte_offset;
418 if (ptr != last_ptr) {
419 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
425 stored = val & (1U << bit_nr);
427 is_pending = irq->pending_latch;
429 if (irq->hw && vlpi_avail)
430 vgic_v4_get_vlpi_state(irq, &is_pending);
432 if (stored == is_pending)
438 val &= ~(1 << bit_nr);
440 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
453 * vgic_v3_rdist_overlap - check if a region overlaps with any
454 * existing redistributor region
457 * @base: base of the region
458 * @size: size of region
460 * Return: true if there is an overlap
462 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
464 struct vgic_dist *d = &kvm->arch.vgic;
465 struct vgic_redist_region *rdreg;
467 list_for_each_entry(rdreg, &d->rd_regions, list) {
468 if ((base + size > rdreg->base) &&
469 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
476 * Check for overlapping regions and for regions crossing the end of memory
477 * for base addresses which have already been set.
479 bool vgic_v3_check_base(struct kvm *kvm)
481 struct vgic_dist *d = &kvm->arch.vgic;
482 struct vgic_redist_region *rdreg;
484 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
485 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
488 list_for_each_entry(rdreg, &d->rd_regions, list) {
489 size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
491 if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
492 rdreg->base, SZ_64K, sz))
496 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
499 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
500 KVM_VGIC_V3_DIST_SIZE);
504 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
505 * which has free space to put a new rdist region.
507 * @rd_regions: redistributor region list head
509 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
510 * Stride between redistributors is 0 and regions are filled in the index order.
512 * Return: the redist region handle, if any, that has space to map a new rdist
515 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
517 struct vgic_redist_region *rdreg;
519 list_for_each_entry(rdreg, rd_regions, list) {
520 if (!vgic_v3_redist_region_full(rdreg))
526 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
529 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
530 struct vgic_redist_region *rdreg;
532 list_for_each_entry(rdreg, rd_regions, list) {
533 if (rdreg->index == index)
540 int vgic_v3_map_resources(struct kvm *kvm)
542 struct vgic_dist *dist = &kvm->arch.vgic;
543 struct kvm_vcpu *vcpu;
547 kvm_for_each_vcpu(c, vcpu, kvm) {
548 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
550 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
551 kvm_debug("vcpu %ld redistributor base not set\n", c);
556 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
557 kvm_debug("Need to set vgic distributor addresses first\n");
561 if (!vgic_v3_check_base(kvm)) {
562 kvm_debug("VGIC redist and dist frames overlap\n");
567 * For a VGICv3 we require the userland to explicitly initialize
568 * the VGIC before we need to use it.
570 if (!vgic_initialized(kvm)) {
574 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
576 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
580 if (kvm_vgic_global_state.has_gicv4_1)
581 vgic_v4_configure_vsgis(kvm);
586 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
588 static int __init early_group0_trap_cfg(char *buf)
590 return strtobool(buf, &group0_trap);
592 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
594 static int __init early_group1_trap_cfg(char *buf)
596 return strtobool(buf, &group1_trap);
598 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
600 static int __init early_common_trap_cfg(char *buf)
602 return strtobool(buf, &common_trap);
604 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
606 static int __init early_gicv4_enable(char *buf)
608 return strtobool(buf, &gicv4_enable);
610 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
613 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
614 * @info: pointer to the GIC description
616 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
619 int vgic_v3_probe(const struct gic_kvm_info *info)
621 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
625 has_v2 = ich_vtr_el2 >> 63;
626 ich_vtr_el2 = (u32)ich_vtr_el2;
629 * The ListRegs field is 5 bits, but there is an architectural
630 * maximum of 16 list registers. Just ignore bit 4...
632 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
633 kvm_vgic_global_state.can_emulate_gicv2 = false;
634 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
638 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
639 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
640 kvm_info("GICv4%s support %sabled\n",
641 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
642 gicv4_enable ? "en" : "dis");
645 kvm_vgic_global_state.vcpu_base = 0;
647 if (!info->vcpu.start) {
648 kvm_info("GICv3: no GICV resource entry\n");
649 } else if (!has_v2) {
650 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
651 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
652 pr_warn("GICV physical address 0x%llx not page aligned\n",
653 (unsigned long long)info->vcpu.start);
654 } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
655 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
656 kvm_vgic_global_state.can_emulate_gicv2 = true;
657 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
659 kvm_err("Cannot register GICv2 KVM device.\n");
662 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
664 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
666 kvm_err("Cannot register GICv3 KVM device.\n");
667 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
671 if (kvm_vgic_global_state.vcpu_base == 0)
672 kvm_info("disabling GICv2 emulation\n");
674 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
679 if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
680 kvm_info("GICv3 with locally generated SEI\n");
684 if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
690 if (group0_trap || group1_trap || common_trap | dir_trap) {
691 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
692 group0_trap ? "G0" : "",
693 group1_trap ? "G1" : "",
694 common_trap ? "C" : "",
695 dir_trap ? "D" : "");
696 static_branch_enable(&vgic_v3_cpuif_trap);
699 kvm_vgic_global_state.vctrl_base = NULL;
700 kvm_vgic_global_state.type = VGIC_V3;
701 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
706 void vgic_v3_load(struct kvm_vcpu *vcpu)
708 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
711 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
712 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
713 * VMCR_EL2 save/restore in the world switch.
715 if (likely(cpu_if->vgic_sre))
716 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
718 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
721 __vgic_v3_activate_traps(cpu_if);
723 WARN_ON(vgic_v4_load(vcpu));
726 void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
728 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
730 if (likely(cpu_if->vgic_sre))
731 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
734 void vgic_v3_put(struct kvm_vcpu *vcpu)
736 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
738 WARN_ON(vgic_v4_put(vcpu, false));
740 vgic_v3_vmcr_sync(vcpu);
742 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
745 __vgic_v3_deactivate_traps(cpu_if);