1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/compiler.h>
8 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_hyp.h>
13 #include <asm/kvm_mmu.h>
15 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
16 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
17 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
19 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
23 return read_gicreg(ICH_LR0_EL2);
25 return read_gicreg(ICH_LR1_EL2);
27 return read_gicreg(ICH_LR2_EL2);
29 return read_gicreg(ICH_LR3_EL2);
31 return read_gicreg(ICH_LR4_EL2);
33 return read_gicreg(ICH_LR5_EL2);
35 return read_gicreg(ICH_LR6_EL2);
37 return read_gicreg(ICH_LR7_EL2);
39 return read_gicreg(ICH_LR8_EL2);
41 return read_gicreg(ICH_LR9_EL2);
43 return read_gicreg(ICH_LR10_EL2);
45 return read_gicreg(ICH_LR11_EL2);
47 return read_gicreg(ICH_LR12_EL2);
49 return read_gicreg(ICH_LR13_EL2);
51 return read_gicreg(ICH_LR14_EL2);
53 return read_gicreg(ICH_LR15_EL2);
59 static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
63 write_gicreg(val, ICH_LR0_EL2);
66 write_gicreg(val, ICH_LR1_EL2);
69 write_gicreg(val, ICH_LR2_EL2);
72 write_gicreg(val, ICH_LR3_EL2);
75 write_gicreg(val, ICH_LR4_EL2);
78 write_gicreg(val, ICH_LR5_EL2);
81 write_gicreg(val, ICH_LR6_EL2);
84 write_gicreg(val, ICH_LR7_EL2);
87 write_gicreg(val, ICH_LR8_EL2);
90 write_gicreg(val, ICH_LR9_EL2);
93 write_gicreg(val, ICH_LR10_EL2);
96 write_gicreg(val, ICH_LR11_EL2);
99 write_gicreg(val, ICH_LR12_EL2);
102 write_gicreg(val, ICH_LR13_EL2);
105 write_gicreg(val, ICH_LR14_EL2);
108 write_gicreg(val, ICH_LR15_EL2);
113 static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
117 write_gicreg(val, ICH_AP0R0_EL2);
120 write_gicreg(val, ICH_AP0R1_EL2);
123 write_gicreg(val, ICH_AP0R2_EL2);
126 write_gicreg(val, ICH_AP0R3_EL2);
131 static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
135 write_gicreg(val, ICH_AP1R0_EL2);
138 write_gicreg(val, ICH_AP1R1_EL2);
141 write_gicreg(val, ICH_AP1R2_EL2);
144 write_gicreg(val, ICH_AP1R3_EL2);
149 static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
155 val = read_gicreg(ICH_AP0R0_EL2);
158 val = read_gicreg(ICH_AP0R1_EL2);
161 val = read_gicreg(ICH_AP0R2_EL2);
164 val = read_gicreg(ICH_AP0R3_EL2);
173 static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
179 val = read_gicreg(ICH_AP1R0_EL2);
182 val = read_gicreg(ICH_AP1R1_EL2);
185 val = read_gicreg(ICH_AP1R2_EL2);
188 val = read_gicreg(ICH_AP1R3_EL2);
197 void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
199 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
200 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
203 * Make sure stores to the GIC via the memory mapped interface
204 * are now visible to the system register interface when reading the
205 * LRs, and when reading back the VMCR on non-VHE systems.
207 if (used_lrs || !has_vhe()) {
208 if (!cpu_if->vgic_sre) {
214 if (used_lrs || cpu_if->its_vpe.its_vm) {
218 elrsr = read_gicreg(ICH_ELRSR_EL2);
220 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
222 for (i = 0; i < used_lrs; i++) {
223 if (elrsr & (1 << i))
224 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
226 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
228 __gic_v3_set_lr(0, i);
233 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
235 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
236 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
239 if (used_lrs || cpu_if->its_vpe.its_vm) {
240 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
242 for (i = 0; i < used_lrs; i++)
243 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
247 * Ensure that writes to the LRs, and on non-VHE systems ensure that
248 * the write to the VMCR in __vgic_v3_activate_traps(), will have
249 * reached the (re)distributors. This ensure the guest will read the
250 * correct values from the memory-mapped interface.
252 if (used_lrs || !has_vhe()) {
253 if (!cpu_if->vgic_sre) {
260 void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
262 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
265 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
266 * Group0 interrupt (as generated in GICv2 mode) to be
267 * delivered as a FIQ to the guest, with potentially fatal
268 * consequences. So we must make sure that ICC_SRE_EL1 has
269 * been actually programmed with the value we want before
270 * starting to mess with the rest of the GIC, and VMCR_EL2 in
271 * particular. This logic must be called before
272 * __vgic_v3_restore_state().
274 if (!cpu_if->vgic_sre) {
275 write_gicreg(0, ICC_SRE_EL1);
277 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
282 * Ensure that the write to the VMCR will have reached
283 * the (re)distributors. This ensure the guest will
284 * read the correct values from the memory-mapped
293 * Prevent the guest from touching the GIC system registers if
294 * SRE isn't enabled for GICv3 emulation.
296 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
300 * If we need to trap system registers, we must write
301 * ICH_HCR_EL2 anyway, even if no interrupts are being
304 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
305 cpu_if->its_vpe.its_vm)
306 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
309 void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
311 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
314 if (!cpu_if->vgic_sre) {
315 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
318 val = read_gicreg(ICC_SRE_EL2);
319 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
321 if (!cpu_if->vgic_sre) {
322 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
324 write_gicreg(1, ICC_SRE_EL1);
328 * If we were trapping system registers, we enabled the VGIC even if
329 * no interrupts were being injected, and we disable it again here.
331 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
332 cpu_if->its_vpe.its_vm)
333 write_gicreg(0, ICH_HCR_EL2);
336 void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
338 struct vgic_v3_cpu_if *cpu_if;
342 vcpu = kern_hyp_va(vcpu);
343 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
345 val = read_gicreg(ICH_VTR_EL2);
346 nr_pre_bits = vtr_to_nr_pre_bits(val);
348 switch (nr_pre_bits) {
350 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
351 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
354 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
357 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
360 switch (nr_pre_bits) {
362 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
363 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
366 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
369 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
373 void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
375 struct vgic_v3_cpu_if *cpu_if;
379 vcpu = kern_hyp_va(vcpu);
380 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
382 val = read_gicreg(ICH_VTR_EL2);
383 nr_pre_bits = vtr_to_nr_pre_bits(val);
385 switch (nr_pre_bits) {
387 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
388 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
391 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
394 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
397 switch (nr_pre_bits) {
399 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
400 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
403 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
406 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
410 void __hyp_text __vgic_v3_init_lrs(void)
412 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
415 for (i = 0; i <= max_lr_idx; i++)
416 __gic_v3_set_lr(0, i);
419 u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
421 return read_gicreg(ICH_VTR_EL2);
424 u64 __hyp_text __vgic_v3_read_vmcr(void)
426 return read_gicreg(ICH_VMCR_EL2);
429 void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
431 write_gicreg(vmcr, ICH_VMCR_EL2);
434 static int __hyp_text __vgic_v3_bpr_min(void)
436 /* See Pseudocode for VPriorityGroup */
437 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
440 static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
442 u32 esr = kvm_vcpu_get_hsr(vcpu);
443 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
448 #define GICv3_IDLE_PRIORITY 0xff
450 static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
454 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
455 u8 priority = GICv3_IDLE_PRIORITY;
458 for (i = 0; i < used_lrs; i++) {
459 u64 val = __gic_v3_get_lr(i);
460 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
462 /* Not pending in the state? */
463 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
466 /* Group-0 interrupt, but Group-0 disabled? */
467 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
470 /* Group-1 interrupt, but Group-1 disabled? */
471 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
474 /* Not the highest priority? */
475 if (lr_prio >= priority)
478 /* This is a candidate */
485 *lr_val = ICC_IAR1_EL1_SPURIOUS;
490 static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
491 int intid, u64 *lr_val)
493 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
496 for (i = 0; i < used_lrs; i++) {
497 u64 val = __gic_v3_get_lr(i);
499 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
500 (val & ICH_LR_ACTIVE_BIT)) {
506 *lr_val = ICC_IAR1_EL1_SPURIOUS;
510 static int __hyp_text __vgic_v3_get_highest_active_priority(void)
512 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
516 for (i = 0; i < nr_apr_regs; i++) {
520 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
521 * contain the active priority levels for this VCPU
522 * for the maximum number of supported priority
523 * levels, and we return the full priority level only
524 * if the BPR is programmed to its minimum, otherwise
525 * we return a combination of the priority level and
526 * subpriority, as determined by the setting of the
527 * BPR, but without the full subpriority.
529 val = __vgic_v3_read_ap0rn(i);
530 val |= __vgic_v3_read_ap1rn(i);
536 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
539 return GICv3_IDLE_PRIORITY;
542 static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
544 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
547 static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
551 if (vmcr & ICH_VMCR_CBPR_MASK) {
552 bpr = __vgic_v3_get_bpr0(vmcr);
556 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
563 * Convert a priority to a preemption level, taking the relevant BPR
564 * into account by zeroing the sub-priority bits.
566 static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
571 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
573 bpr = __vgic_v3_get_bpr1(vmcr);
575 return pri & (GENMASK(7, 0) << bpr);
579 * The priority value is independent of any of the BPR values, so we
580 * normalize it using the minimal BPR value. This guarantees that no
581 * matter what the guest does with its BPR, we can always set/get the
582 * same value of a priority.
584 static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
590 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
591 ap = pre >> __vgic_v3_bpr_min();
595 val = __vgic_v3_read_ap0rn(apr);
596 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
598 val = __vgic_v3_read_ap1rn(apr);
599 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
603 static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
605 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
609 for (i = 0; i < nr_apr_regs; i++) {
613 ap0 = __vgic_v3_read_ap0rn(i);
614 ap1 = __vgic_v3_read_ap1rn(i);
620 c0 = ap0 ? __ffs(ap0) : 32;
621 c1 = ap1 ? __ffs(ap1) : 32;
623 /* Always clear the LSB, which is the highest priority */
626 __vgic_v3_write_ap0rn(ap0, i);
630 __vgic_v3_write_ap1rn(ap1, i);
634 /* Rescale to 8 bits of priority */
635 return hap << __vgic_v3_bpr_min();
638 return GICv3_IDLE_PRIORITY;
641 static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
647 grp = __vgic_v3_get_group(vcpu);
649 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
653 if (grp != !!(lr_val & ICH_LR_GROUP))
656 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
657 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
661 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
664 lr_val &= ~ICH_LR_STATE;
665 /* No active state for LPIs */
666 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
667 lr_val |= ICH_LR_ACTIVE_BIT;
668 __gic_v3_set_lr(lr_val, lr);
669 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
670 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
674 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
677 static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
679 lr_val &= ~ICH_LR_ACTIVE_BIT;
680 if (lr_val & ICH_LR_HW) {
683 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
687 __gic_v3_set_lr(lr_val, lr);
690 static void __hyp_text __vgic_v3_bump_eoicount(void)
694 hcr = read_gicreg(ICH_HCR_EL2);
695 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
696 write_gicreg(hcr, ICH_HCR_EL2);
699 static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
702 u32 vid = vcpu_get_reg(vcpu, rt);
706 /* EOImode == 0, nothing to be done here */
707 if (!(vmcr & ICH_VMCR_EOIM_MASK))
710 /* No deactivate to be performed on an LPI */
711 if (vid >= VGIC_MIN_LPI)
714 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
716 __vgic_v3_bump_eoicount();
720 __vgic_v3_clear_active_lr(lr, lr_val);
723 static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
725 u32 vid = vcpu_get_reg(vcpu, rt);
727 u8 lr_prio, act_prio;
730 grp = __vgic_v3_get_group(vcpu);
732 /* Drop priority in any case */
733 act_prio = __vgic_v3_clear_highest_active_priority();
735 /* If EOIing an LPI, no deactivate to be performed */
736 if (vid >= VGIC_MIN_LPI)
739 /* EOImode == 1, nothing to be done here */
740 if (vmcr & ICH_VMCR_EOIM_MASK)
743 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
745 __vgic_v3_bump_eoicount();
749 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
751 /* If priorities or group do not match, the guest has fscked-up. */
752 if (grp != !!(lr_val & ICH_LR_GROUP) ||
753 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
756 /* Let's now perform the deactivation */
757 __vgic_v3_clear_active_lr(lr, lr_val);
760 static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
762 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
765 static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
767 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
770 static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
772 u64 val = vcpu_get_reg(vcpu, rt);
775 vmcr |= ICH_VMCR_ENG0_MASK;
777 vmcr &= ~ICH_VMCR_ENG0_MASK;
779 __vgic_v3_write_vmcr(vmcr);
782 static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
784 u64 val = vcpu_get_reg(vcpu, rt);
787 vmcr |= ICH_VMCR_ENG1_MASK;
789 vmcr &= ~ICH_VMCR_ENG1_MASK;
791 __vgic_v3_write_vmcr(vmcr);
794 static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
796 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
799 static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
801 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
804 static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
806 u64 val = vcpu_get_reg(vcpu, rt);
807 u8 bpr_min = __vgic_v3_bpr_min() - 1;
809 /* Enforce BPR limiting */
813 val <<= ICH_VMCR_BPR0_SHIFT;
814 val &= ICH_VMCR_BPR0_MASK;
815 vmcr &= ~ICH_VMCR_BPR0_MASK;
818 __vgic_v3_write_vmcr(vmcr);
821 static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
823 u64 val = vcpu_get_reg(vcpu, rt);
824 u8 bpr_min = __vgic_v3_bpr_min();
826 if (vmcr & ICH_VMCR_CBPR_MASK)
829 /* Enforce BPR limiting */
833 val <<= ICH_VMCR_BPR1_SHIFT;
834 val &= ICH_VMCR_BPR1_MASK;
835 vmcr &= ~ICH_VMCR_BPR1_MASK;
838 __vgic_v3_write_vmcr(vmcr);
841 static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
845 if (!__vgic_v3_get_group(vcpu))
846 val = __vgic_v3_read_ap0rn(n);
848 val = __vgic_v3_read_ap1rn(n);
850 vcpu_set_reg(vcpu, rt, val);
853 static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
855 u32 val = vcpu_get_reg(vcpu, rt);
857 if (!__vgic_v3_get_group(vcpu))
858 __vgic_v3_write_ap0rn(val, n);
860 __vgic_v3_write_ap1rn(val, n);
863 static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
866 __vgic_v3_read_apxrn(vcpu, rt, 0);
869 static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
872 __vgic_v3_read_apxrn(vcpu, rt, 1);
875 static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
878 __vgic_v3_read_apxrn(vcpu, rt, 2);
881 static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
884 __vgic_v3_read_apxrn(vcpu, rt, 3);
887 static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
890 __vgic_v3_write_apxrn(vcpu, rt, 0);
893 static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
896 __vgic_v3_write_apxrn(vcpu, rt, 1);
899 static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
902 __vgic_v3_write_apxrn(vcpu, rt, 2);
905 static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
908 __vgic_v3_write_apxrn(vcpu, rt, 3);
911 static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
917 grp = __vgic_v3_get_group(vcpu);
919 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
923 lr_grp = !!(lr_val & ICH_LR_GROUP);
925 lr_val = ICC_IAR1_EL1_SPURIOUS;
928 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
931 static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
934 vmcr &= ICH_VMCR_PMR_MASK;
935 vmcr >>= ICH_VMCR_PMR_SHIFT;
936 vcpu_set_reg(vcpu, rt, vmcr);
939 static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
942 u32 val = vcpu_get_reg(vcpu, rt);
944 val <<= ICH_VMCR_PMR_SHIFT;
945 val &= ICH_VMCR_PMR_MASK;
946 vmcr &= ~ICH_VMCR_PMR_MASK;
949 write_gicreg(vmcr, ICH_VMCR_EL2);
952 static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
955 u32 val = __vgic_v3_get_highest_active_priority();
956 vcpu_set_reg(vcpu, rt, val);
959 static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
964 vtr = read_gicreg(ICH_VTR_EL2);
966 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
968 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
970 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
972 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
974 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
976 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
978 vcpu_set_reg(vcpu, rt, val);
981 static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
984 u32 val = vcpu_get_reg(vcpu, rt);
986 if (val & ICC_CTLR_EL1_CBPR_MASK)
987 vmcr |= ICH_VMCR_CBPR_MASK;
989 vmcr &= ~ICH_VMCR_CBPR_MASK;
991 if (val & ICC_CTLR_EL1_EOImode_MASK)
992 vmcr |= ICH_VMCR_EOIM_MASK;
994 vmcr &= ~ICH_VMCR_EOIM_MASK;
996 write_gicreg(vmcr, ICH_VMCR_EL2);
999 int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1004 void (*fn)(struct kvm_vcpu *, u32, int);
1008 esr = kvm_vcpu_get_hsr(vcpu);
1009 if (vcpu_mode_is_32bit(vcpu)) {
1010 if (!kvm_condition_valid(vcpu)) {
1011 __kvm_skip_instr(vcpu);
1015 sysreg = esr_cp15_to_sysreg(esr);
1017 sysreg = esr_sys64_to_sysreg(esr);
1020 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1023 case SYS_ICC_IAR0_EL1:
1024 case SYS_ICC_IAR1_EL1:
1025 if (unlikely(!is_read))
1027 fn = __vgic_v3_read_iar;
1029 case SYS_ICC_EOIR0_EL1:
1030 case SYS_ICC_EOIR1_EL1:
1031 if (unlikely(is_read))
1033 fn = __vgic_v3_write_eoir;
1035 case SYS_ICC_IGRPEN1_EL1:
1037 fn = __vgic_v3_read_igrpen1;
1039 fn = __vgic_v3_write_igrpen1;
1041 case SYS_ICC_BPR1_EL1:
1043 fn = __vgic_v3_read_bpr1;
1045 fn = __vgic_v3_write_bpr1;
1047 case SYS_ICC_AP0Rn_EL1(0):
1048 case SYS_ICC_AP1Rn_EL1(0):
1050 fn = __vgic_v3_read_apxr0;
1052 fn = __vgic_v3_write_apxr0;
1054 case SYS_ICC_AP0Rn_EL1(1):
1055 case SYS_ICC_AP1Rn_EL1(1):
1057 fn = __vgic_v3_read_apxr1;
1059 fn = __vgic_v3_write_apxr1;
1061 case SYS_ICC_AP0Rn_EL1(2):
1062 case SYS_ICC_AP1Rn_EL1(2):
1064 fn = __vgic_v3_read_apxr2;
1066 fn = __vgic_v3_write_apxr2;
1068 case SYS_ICC_AP0Rn_EL1(3):
1069 case SYS_ICC_AP1Rn_EL1(3):
1071 fn = __vgic_v3_read_apxr3;
1073 fn = __vgic_v3_write_apxr3;
1075 case SYS_ICC_HPPIR0_EL1:
1076 case SYS_ICC_HPPIR1_EL1:
1077 if (unlikely(!is_read))
1079 fn = __vgic_v3_read_hppir;
1081 case SYS_ICC_IGRPEN0_EL1:
1083 fn = __vgic_v3_read_igrpen0;
1085 fn = __vgic_v3_write_igrpen0;
1087 case SYS_ICC_BPR0_EL1:
1089 fn = __vgic_v3_read_bpr0;
1091 fn = __vgic_v3_write_bpr0;
1093 case SYS_ICC_DIR_EL1:
1094 if (unlikely(is_read))
1096 fn = __vgic_v3_write_dir;
1098 case SYS_ICC_RPR_EL1:
1099 if (unlikely(!is_read))
1101 fn = __vgic_v3_read_rpr;
1103 case SYS_ICC_CTLR_EL1:
1105 fn = __vgic_v3_read_ctlr;
1107 fn = __vgic_v3_write_ctlr;
1109 case SYS_ICC_PMR_EL1:
1111 fn = __vgic_v3_read_pmr;
1113 fn = __vgic_v3_write_pmr;
1119 vmcr = __vgic_v3_read_vmcr();
1120 rt = kvm_vcpu_sys_get_rt(vcpu);
1123 __kvm_skip_instr(vcpu);