1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <hyp/adjust_pc.h>
9 #include <linux/compiler.h>
10 #include <linux/irqchip/arm-gic-v3.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_hyp.h>
15 #include <asm/kvm_mmu.h>
17 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
18 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
19 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
21 static u64 __gic_v3_get_lr(unsigned int lr)
25 return read_gicreg(ICH_LR0_EL2);
27 return read_gicreg(ICH_LR1_EL2);
29 return read_gicreg(ICH_LR2_EL2);
31 return read_gicreg(ICH_LR3_EL2);
33 return read_gicreg(ICH_LR4_EL2);
35 return read_gicreg(ICH_LR5_EL2);
37 return read_gicreg(ICH_LR6_EL2);
39 return read_gicreg(ICH_LR7_EL2);
41 return read_gicreg(ICH_LR8_EL2);
43 return read_gicreg(ICH_LR9_EL2);
45 return read_gicreg(ICH_LR10_EL2);
47 return read_gicreg(ICH_LR11_EL2);
49 return read_gicreg(ICH_LR12_EL2);
51 return read_gicreg(ICH_LR13_EL2);
53 return read_gicreg(ICH_LR14_EL2);
55 return read_gicreg(ICH_LR15_EL2);
61 static void __gic_v3_set_lr(u64 val, int lr)
65 write_gicreg(val, ICH_LR0_EL2);
68 write_gicreg(val, ICH_LR1_EL2);
71 write_gicreg(val, ICH_LR2_EL2);
74 write_gicreg(val, ICH_LR3_EL2);
77 write_gicreg(val, ICH_LR4_EL2);
80 write_gicreg(val, ICH_LR5_EL2);
83 write_gicreg(val, ICH_LR6_EL2);
86 write_gicreg(val, ICH_LR7_EL2);
89 write_gicreg(val, ICH_LR8_EL2);
92 write_gicreg(val, ICH_LR9_EL2);
95 write_gicreg(val, ICH_LR10_EL2);
98 write_gicreg(val, ICH_LR11_EL2);
101 write_gicreg(val, ICH_LR12_EL2);
104 write_gicreg(val, ICH_LR13_EL2);
107 write_gicreg(val, ICH_LR14_EL2);
110 write_gicreg(val, ICH_LR15_EL2);
115 static void __vgic_v3_write_ap0rn(u32 val, int n)
119 write_gicreg(val, ICH_AP0R0_EL2);
122 write_gicreg(val, ICH_AP0R1_EL2);
125 write_gicreg(val, ICH_AP0R2_EL2);
128 write_gicreg(val, ICH_AP0R3_EL2);
133 static void __vgic_v3_write_ap1rn(u32 val, int n)
137 write_gicreg(val, ICH_AP1R0_EL2);
140 write_gicreg(val, ICH_AP1R1_EL2);
143 write_gicreg(val, ICH_AP1R2_EL2);
146 write_gicreg(val, ICH_AP1R3_EL2);
151 static u32 __vgic_v3_read_ap0rn(int n)
157 val = read_gicreg(ICH_AP0R0_EL2);
160 val = read_gicreg(ICH_AP0R1_EL2);
163 val = read_gicreg(ICH_AP0R2_EL2);
166 val = read_gicreg(ICH_AP0R3_EL2);
175 static u32 __vgic_v3_read_ap1rn(int n)
181 val = read_gicreg(ICH_AP1R0_EL2);
184 val = read_gicreg(ICH_AP1R1_EL2);
187 val = read_gicreg(ICH_AP1R2_EL2);
190 val = read_gicreg(ICH_AP1R3_EL2);
199 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
201 u64 used_lrs = cpu_if->used_lrs;
204 * Make sure stores to the GIC via the memory mapped interface
205 * are now visible to the system register interface when reading the
206 * LRs, and when reading back the VMCR on non-VHE systems.
208 if (used_lrs || !has_vhe()) {
209 if (!cpu_if->vgic_sre) {
215 if (used_lrs || cpu_if->its_vpe.its_vm) {
219 elrsr = read_gicreg(ICH_ELRSR_EL2);
221 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
223 for (i = 0; i < used_lrs; i++) {
224 if (elrsr & (1 << i))
225 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
227 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
229 __gic_v3_set_lr(0, i);
234 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
236 u64 used_lrs = cpu_if->used_lrs;
239 if (used_lrs || cpu_if->its_vpe.its_vm) {
240 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
242 for (i = 0; i < used_lrs; i++)
243 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
247 * Ensure that writes to the LRs, and on non-VHE systems ensure that
248 * the write to the VMCR in __vgic_v3_activate_traps(), will have
249 * reached the (re)distributors. This ensure the guest will read the
250 * correct values from the memory-mapped interface.
252 if (used_lrs || !has_vhe()) {
253 if (!cpu_if->vgic_sre) {
260 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
263 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
264 * Group0 interrupt (as generated in GICv2 mode) to be
265 * delivered as a FIQ to the guest, with potentially fatal
266 * consequences. So we must make sure that ICC_SRE_EL1 has
267 * been actually programmed with the value we want before
268 * starting to mess with the rest of the GIC, and VMCR_EL2 in
269 * particular. This logic must be called before
270 * __vgic_v3_restore_state().
272 if (!cpu_if->vgic_sre) {
273 write_gicreg(0, ICC_SRE_EL1);
275 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
280 * Ensure that the write to the VMCR will have reached
281 * the (re)distributors. This ensure the guest will
282 * read the correct values from the memory-mapped
291 * Prevent the guest from touching the GIC system registers if
292 * SRE isn't enabled for GICv3 emulation.
294 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
298 * If we need to trap system registers, we must write
299 * ICH_HCR_EL2 anyway, even if no interrupts are being
302 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
303 cpu_if->its_vpe.its_vm)
304 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
307 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
311 if (!cpu_if->vgic_sre) {
312 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
315 val = read_gicreg(ICC_SRE_EL2);
316 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
318 if (!cpu_if->vgic_sre) {
319 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
321 write_gicreg(1, ICC_SRE_EL1);
325 * If we were trapping system registers, we enabled the VGIC even if
326 * no interrupts were being injected, and we disable it again here.
328 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
329 cpu_if->its_vpe.its_vm)
330 write_gicreg(0, ICH_HCR_EL2);
333 void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
338 val = read_gicreg(ICH_VTR_EL2);
339 nr_pre_bits = vtr_to_nr_pre_bits(val);
341 switch (nr_pre_bits) {
343 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
344 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
347 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
350 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
353 switch (nr_pre_bits) {
355 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
356 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
359 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
362 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
366 void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
371 val = read_gicreg(ICH_VTR_EL2);
372 nr_pre_bits = vtr_to_nr_pre_bits(val);
374 switch (nr_pre_bits) {
376 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
377 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
380 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
383 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
386 switch (nr_pre_bits) {
388 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
389 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
392 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
395 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
399 void __vgic_v3_init_lrs(void)
401 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
404 for (i = 0; i <= max_lr_idx; i++)
405 __gic_v3_set_lr(0, i);
408 u64 __vgic_v3_get_ich_vtr_el2(void)
410 return read_gicreg(ICH_VTR_EL2);
413 u64 __vgic_v3_read_vmcr(void)
415 return read_gicreg(ICH_VMCR_EL2);
418 void __vgic_v3_write_vmcr(u32 vmcr)
420 write_gicreg(vmcr, ICH_VMCR_EL2);
423 static int __vgic_v3_bpr_min(void)
425 /* See Pseudocode for VPriorityGroup */
426 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
429 static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
431 u32 esr = kvm_vcpu_get_esr(vcpu);
432 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
437 #define GICv3_IDLE_PRIORITY 0xff
439 static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
442 unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
443 u8 priority = GICv3_IDLE_PRIORITY;
446 for (i = 0; i < used_lrs; i++) {
447 u64 val = __gic_v3_get_lr(i);
448 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
450 /* Not pending in the state? */
451 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
454 /* Group-0 interrupt, but Group-0 disabled? */
455 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
458 /* Group-1 interrupt, but Group-1 disabled? */
459 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
462 /* Not the highest priority? */
463 if (lr_prio >= priority)
466 /* This is a candidate */
473 *lr_val = ICC_IAR1_EL1_SPURIOUS;
478 static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
481 unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
484 for (i = 0; i < used_lrs; i++) {
485 u64 val = __gic_v3_get_lr(i);
487 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
488 (val & ICH_LR_ACTIVE_BIT)) {
494 *lr_val = ICC_IAR1_EL1_SPURIOUS;
498 static int __vgic_v3_get_highest_active_priority(void)
500 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
504 for (i = 0; i < nr_apr_regs; i++) {
508 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
509 * contain the active priority levels for this VCPU
510 * for the maximum number of supported priority
511 * levels, and we return the full priority level only
512 * if the BPR is programmed to its minimum, otherwise
513 * we return a combination of the priority level and
514 * subpriority, as determined by the setting of the
515 * BPR, but without the full subpriority.
517 val = __vgic_v3_read_ap0rn(i);
518 val |= __vgic_v3_read_ap1rn(i);
524 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
527 return GICv3_IDLE_PRIORITY;
530 static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
532 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
535 static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
539 if (vmcr & ICH_VMCR_CBPR_MASK) {
540 bpr = __vgic_v3_get_bpr0(vmcr);
544 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
551 * Convert a priority to a preemption level, taking the relevant BPR
552 * into account by zeroing the sub-priority bits.
554 static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
559 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
561 bpr = __vgic_v3_get_bpr1(vmcr);
563 return pri & (GENMASK(7, 0) << bpr);
567 * The priority value is independent of any of the BPR values, so we
568 * normalize it using the minimal BPR value. This guarantees that no
569 * matter what the guest does with its BPR, we can always set/get the
570 * same value of a priority.
572 static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
578 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
579 ap = pre >> __vgic_v3_bpr_min();
583 val = __vgic_v3_read_ap0rn(apr);
584 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
586 val = __vgic_v3_read_ap1rn(apr);
587 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
591 static int __vgic_v3_clear_highest_active_priority(void)
593 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
597 for (i = 0; i < nr_apr_regs; i++) {
601 ap0 = __vgic_v3_read_ap0rn(i);
602 ap1 = __vgic_v3_read_ap1rn(i);
608 c0 = ap0 ? __ffs(ap0) : 32;
609 c1 = ap1 ? __ffs(ap1) : 32;
611 /* Always clear the LSB, which is the highest priority */
614 __vgic_v3_write_ap0rn(ap0, i);
618 __vgic_v3_write_ap1rn(ap1, i);
622 /* Rescale to 8 bits of priority */
623 return hap << __vgic_v3_bpr_min();
626 return GICv3_IDLE_PRIORITY;
629 static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
635 grp = __vgic_v3_get_group(vcpu);
637 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
641 if (grp != !!(lr_val & ICH_LR_GROUP))
644 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
645 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
649 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
652 lr_val &= ~ICH_LR_STATE;
653 /* No active state for LPIs */
654 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
655 lr_val |= ICH_LR_ACTIVE_BIT;
656 __gic_v3_set_lr(lr_val, lr);
657 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
658 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
662 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
665 static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
667 lr_val &= ~ICH_LR_ACTIVE_BIT;
668 if (lr_val & ICH_LR_HW) {
671 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
675 __gic_v3_set_lr(lr_val, lr);
678 static void __vgic_v3_bump_eoicount(void)
682 hcr = read_gicreg(ICH_HCR_EL2);
683 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
684 write_gicreg(hcr, ICH_HCR_EL2);
687 static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
689 u32 vid = vcpu_get_reg(vcpu, rt);
693 /* EOImode == 0, nothing to be done here */
694 if (!(vmcr & ICH_VMCR_EOIM_MASK))
697 /* No deactivate to be performed on an LPI */
698 if (vid >= VGIC_MIN_LPI)
701 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
703 __vgic_v3_bump_eoicount();
707 __vgic_v3_clear_active_lr(lr, lr_val);
710 static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
712 u32 vid = vcpu_get_reg(vcpu, rt);
714 u8 lr_prio, act_prio;
717 grp = __vgic_v3_get_group(vcpu);
719 /* Drop priority in any case */
720 act_prio = __vgic_v3_clear_highest_active_priority();
722 /* If EOIing an LPI, no deactivate to be performed */
723 if (vid >= VGIC_MIN_LPI)
726 /* EOImode == 1, nothing to be done here */
727 if (vmcr & ICH_VMCR_EOIM_MASK)
730 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
732 __vgic_v3_bump_eoicount();
736 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
738 /* If priorities or group do not match, the guest has fscked-up. */
739 if (grp != !!(lr_val & ICH_LR_GROUP) ||
740 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
743 /* Let's now perform the deactivation */
744 __vgic_v3_clear_active_lr(lr, lr_val);
747 static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
749 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
752 static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
754 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
757 static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
759 u64 val = vcpu_get_reg(vcpu, rt);
762 vmcr |= ICH_VMCR_ENG0_MASK;
764 vmcr &= ~ICH_VMCR_ENG0_MASK;
766 __vgic_v3_write_vmcr(vmcr);
769 static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
771 u64 val = vcpu_get_reg(vcpu, rt);
774 vmcr |= ICH_VMCR_ENG1_MASK;
776 vmcr &= ~ICH_VMCR_ENG1_MASK;
778 __vgic_v3_write_vmcr(vmcr);
781 static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
783 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
786 static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
788 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
791 static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
793 u64 val = vcpu_get_reg(vcpu, rt);
794 u8 bpr_min = __vgic_v3_bpr_min() - 1;
796 /* Enforce BPR limiting */
800 val <<= ICH_VMCR_BPR0_SHIFT;
801 val &= ICH_VMCR_BPR0_MASK;
802 vmcr &= ~ICH_VMCR_BPR0_MASK;
805 __vgic_v3_write_vmcr(vmcr);
808 static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
810 u64 val = vcpu_get_reg(vcpu, rt);
811 u8 bpr_min = __vgic_v3_bpr_min();
813 if (vmcr & ICH_VMCR_CBPR_MASK)
816 /* Enforce BPR limiting */
820 val <<= ICH_VMCR_BPR1_SHIFT;
821 val &= ICH_VMCR_BPR1_MASK;
822 vmcr &= ~ICH_VMCR_BPR1_MASK;
825 __vgic_v3_write_vmcr(vmcr);
828 static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
832 if (!__vgic_v3_get_group(vcpu))
833 val = __vgic_v3_read_ap0rn(n);
835 val = __vgic_v3_read_ap1rn(n);
837 vcpu_set_reg(vcpu, rt, val);
840 static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
842 u32 val = vcpu_get_reg(vcpu, rt);
844 if (!__vgic_v3_get_group(vcpu))
845 __vgic_v3_write_ap0rn(val, n);
847 __vgic_v3_write_ap1rn(val, n);
850 static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
853 __vgic_v3_read_apxrn(vcpu, rt, 0);
856 static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
859 __vgic_v3_read_apxrn(vcpu, rt, 1);
862 static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
864 __vgic_v3_read_apxrn(vcpu, rt, 2);
867 static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
869 __vgic_v3_read_apxrn(vcpu, rt, 3);
872 static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
874 __vgic_v3_write_apxrn(vcpu, rt, 0);
877 static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
879 __vgic_v3_write_apxrn(vcpu, rt, 1);
882 static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
884 __vgic_v3_write_apxrn(vcpu, rt, 2);
887 static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
889 __vgic_v3_write_apxrn(vcpu, rt, 3);
892 static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
897 grp = __vgic_v3_get_group(vcpu);
899 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
903 lr_grp = !!(lr_val & ICH_LR_GROUP);
905 lr_val = ICC_IAR1_EL1_SPURIOUS;
908 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
911 static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
913 vmcr &= ICH_VMCR_PMR_MASK;
914 vmcr >>= ICH_VMCR_PMR_SHIFT;
915 vcpu_set_reg(vcpu, rt, vmcr);
918 static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
920 u32 val = vcpu_get_reg(vcpu, rt);
922 val <<= ICH_VMCR_PMR_SHIFT;
923 val &= ICH_VMCR_PMR_MASK;
924 vmcr &= ~ICH_VMCR_PMR_MASK;
927 write_gicreg(vmcr, ICH_VMCR_EL2);
930 static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
932 u32 val = __vgic_v3_get_highest_active_priority();
933 vcpu_set_reg(vcpu, rt, val);
936 static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
940 vtr = read_gicreg(ICH_VTR_EL2);
942 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
944 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
946 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
948 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
950 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
952 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
954 vcpu_set_reg(vcpu, rt, val);
957 static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
959 u32 val = vcpu_get_reg(vcpu, rt);
961 if (val & ICC_CTLR_EL1_CBPR_MASK)
962 vmcr |= ICH_VMCR_CBPR_MASK;
964 vmcr &= ~ICH_VMCR_CBPR_MASK;
966 if (val & ICC_CTLR_EL1_EOImode_MASK)
967 vmcr |= ICH_VMCR_EOIM_MASK;
969 vmcr &= ~ICH_VMCR_EOIM_MASK;
971 write_gicreg(vmcr, ICH_VMCR_EL2);
974 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
979 void (*fn)(struct kvm_vcpu *, u32, int);
983 esr = kvm_vcpu_get_esr(vcpu);
984 if (vcpu_mode_is_32bit(vcpu)) {
985 if (!kvm_condition_valid(vcpu)) {
986 __kvm_skip_instr(vcpu);
990 sysreg = esr_cp15_to_sysreg(esr);
992 sysreg = esr_sys64_to_sysreg(esr);
995 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
998 case SYS_ICC_IAR0_EL1:
999 case SYS_ICC_IAR1_EL1:
1000 if (unlikely(!is_read))
1002 fn = __vgic_v3_read_iar;
1004 case SYS_ICC_EOIR0_EL1:
1005 case SYS_ICC_EOIR1_EL1:
1006 if (unlikely(is_read))
1008 fn = __vgic_v3_write_eoir;
1010 case SYS_ICC_IGRPEN1_EL1:
1012 fn = __vgic_v3_read_igrpen1;
1014 fn = __vgic_v3_write_igrpen1;
1016 case SYS_ICC_BPR1_EL1:
1018 fn = __vgic_v3_read_bpr1;
1020 fn = __vgic_v3_write_bpr1;
1022 case SYS_ICC_AP0Rn_EL1(0):
1023 case SYS_ICC_AP1Rn_EL1(0):
1025 fn = __vgic_v3_read_apxr0;
1027 fn = __vgic_v3_write_apxr0;
1029 case SYS_ICC_AP0Rn_EL1(1):
1030 case SYS_ICC_AP1Rn_EL1(1):
1032 fn = __vgic_v3_read_apxr1;
1034 fn = __vgic_v3_write_apxr1;
1036 case SYS_ICC_AP0Rn_EL1(2):
1037 case SYS_ICC_AP1Rn_EL1(2):
1039 fn = __vgic_v3_read_apxr2;
1041 fn = __vgic_v3_write_apxr2;
1043 case SYS_ICC_AP0Rn_EL1(3):
1044 case SYS_ICC_AP1Rn_EL1(3):
1046 fn = __vgic_v3_read_apxr3;
1048 fn = __vgic_v3_write_apxr3;
1050 case SYS_ICC_HPPIR0_EL1:
1051 case SYS_ICC_HPPIR1_EL1:
1052 if (unlikely(!is_read))
1054 fn = __vgic_v3_read_hppir;
1056 case SYS_ICC_IGRPEN0_EL1:
1058 fn = __vgic_v3_read_igrpen0;
1060 fn = __vgic_v3_write_igrpen0;
1062 case SYS_ICC_BPR0_EL1:
1064 fn = __vgic_v3_read_bpr0;
1066 fn = __vgic_v3_write_bpr0;
1068 case SYS_ICC_DIR_EL1:
1069 if (unlikely(is_read))
1071 fn = __vgic_v3_write_dir;
1073 case SYS_ICC_RPR_EL1:
1074 if (unlikely(!is_read))
1076 fn = __vgic_v3_read_rpr;
1078 case SYS_ICC_CTLR_EL1:
1080 fn = __vgic_v3_read_ctlr;
1082 fn = __vgic_v3_write_ctlr;
1084 case SYS_ICC_PMR_EL1:
1086 fn = __vgic_v3_read_pmr;
1088 fn = __vgic_v3_write_pmr;
1094 vmcr = __vgic_v3_read_vmcr();
1095 rt = kvm_vcpu_sys_get_rt(vcpu);
1098 __kvm_skip_instr(vcpu);