2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic.h>
20 #include <linux/kvm_host.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
25 static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
28 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
29 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
34 expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
36 for (i = 0; i < used_lrs && !expect_mi; i++)
37 expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
38 (cpu_if->vgic_lr[i] & GICH_LR_EOI));
41 cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
43 if (cpu_if->vgic_misr & GICH_MISR_EOI) {
44 eisr0 = readl_relaxed(base + GICH_EISR0);
45 if (unlikely(used_lrs > 32))
46 eisr1 = readl_relaxed(base + GICH_EISR1);
53 cpu_if->vgic_misr = 0;
57 #ifdef CONFIG_CPU_BIG_ENDIAN
58 cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
60 cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
64 static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
66 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
67 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
70 elrsr0 = readl_relaxed(base + GICH_ELRSR0);
71 if (unlikely(nr_lr > 32))
72 elrsr1 = readl_relaxed(base + GICH_ELRSR1);
76 #ifdef CONFIG_CPU_BIG_ENDIAN
77 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
79 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
83 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
85 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
87 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
89 for (i = 0; i < used_lrs; i++) {
90 if (cpu_if->vgic_elrsr & (1UL << i))
91 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
93 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
95 writel_relaxed(0, base + GICH_LR0 + (i * 4));
99 /* vcpu is already in the HYP VA space */
100 void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
102 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
103 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
104 struct vgic_dist *vgic = &kvm->arch.vgic;
105 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
106 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
112 cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
114 save_maint_int_state(vcpu, base);
115 save_elrsr(vcpu, base);
116 save_lrs(vcpu, base);
118 writel_relaxed(0, base + GICH_HCR);
120 cpu_if->vgic_eisr = 0;
121 cpu_if->vgic_elrsr = ~0UL;
122 cpu_if->vgic_misr = 0;
123 cpu_if->vgic_apr = 0;
127 /* vcpu is already in the HYP VA space */
128 void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
130 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
131 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
132 struct vgic_dist *vgic = &kvm->arch.vgic;
133 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
135 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
141 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
142 writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
143 for (i = 0; i < used_lrs; i++) {
144 writel_relaxed(cpu_if->vgic_lr[i],
145 base + GICH_LR0 + (i * 4));
152 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
155 * @vcpu: the offending vcpu
158 * 1: GICV access successfully performed
159 * 0: Not a GICV access
160 * -1: Illegal GICV access
162 int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
164 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
165 struct vgic_dist *vgic = &kvm->arch.vgic;
166 phys_addr_t fault_ipa;
170 /* Build the full address */
171 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
172 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
174 /* If not for GICV, move on */
175 if (fault_ipa < vgic->vgic_cpu_base ||
176 fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
179 /* Reject anything but a 32bit access */
180 if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
183 /* Not aligned? Don't bother */
187 rd = kvm_vcpu_dabt_get_rd(vcpu);
188 addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
189 addr += fault_ipa - vgic->vgic_cpu_base;
191 if (kvm_vcpu_dabt_iswrite(vcpu)) {
192 u32 data = vcpu_data_guest_to_host(vcpu,
193 vcpu_get_reg(vcpu, rd),
195 writel_relaxed(data, addr);
197 u32 data = readl_relaxed(addr);
198 vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,