2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/printk.h>
27 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
33 #include <asm/kvm_arm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_hyp.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/perf_event.h>
40 #include <asm/sysreg.h>
42 #include <trace/events/kvm.h>
49 * All of this file is extremly similar to the ARM coproc.c, but the
50 * types are different. My gut feeling is that it should be pretty
51 * easy to merge, but that would be an ABI breakage -- again. VFP
52 * would also need to be abstracted.
54 * For AArch32, we only take care of what is being trapped. Anything
55 * that has to do with init and userspace access has to go via the
59 static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r)
63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 print_sys_reg_instr(params);
65 kvm_inject_undefined(vcpu);
69 static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 print_sys_reg_instr(params);
75 kvm_inject_undefined(vcpu);
79 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
81 if (!vcpu->arch.sysregs_loaded_on_cpu)
85 * System registers listed in the switch are not saved on every
86 * exit from the guest but are only saved on vcpu_put.
88 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
89 * should never be listed below, because the guest cannot modify its
90 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
91 * thread when emulating cross-VCPU communication.
94 case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
95 case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
96 case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
97 case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
98 case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
99 case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
100 case TCR_EL1: return read_sysreg_s(tcr_EL12);
101 case ESR_EL1: return read_sysreg_s(esr_EL12);
102 case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
103 case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
104 case FAR_EL1: return read_sysreg_s(far_EL12);
105 case MAIR_EL1: return read_sysreg_s(mair_EL12);
106 case VBAR_EL1: return read_sysreg_s(vbar_EL12);
107 case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
108 case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
109 case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
110 case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
111 case AMAIR_EL1: return read_sysreg_s(amair_EL12);
112 case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
113 case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
114 case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
115 case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
116 case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
120 return __vcpu_sys_reg(vcpu, reg);
123 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
125 if (!vcpu->arch.sysregs_loaded_on_cpu)
126 goto immediate_write;
129 * System registers listed in the switch are not restored on every
130 * entry to the guest but are only restored on vcpu_load.
132 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
133 * should never be listed below, because the the MPIDR should only be
134 * set once, before running the VCPU, and never changed later.
137 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
138 case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
139 case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
140 case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
141 case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
142 case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
143 case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
144 case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
145 case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
146 case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
147 case FAR_EL1: write_sysreg_s(val, far_EL12); return;
148 case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
149 case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
150 case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
151 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
152 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
153 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
154 case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
155 case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
156 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
157 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
158 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
159 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
163 __vcpu_sys_reg(vcpu, reg) = val;
166 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
167 static u32 cache_levels;
169 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
170 #define CSSELR_MAX 12
172 /* Which cache CCSIDR represents depends on CSSELR value. */
173 static u32 get_ccsidr(u32 csselr)
177 /* Make sure noone else changes CSSELR during this! */
179 write_sysreg(csselr, csselr_el1);
181 ccsidr = read_sysreg(ccsidr_el1);
188 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
190 static bool access_dcsw(struct kvm_vcpu *vcpu,
191 struct sys_reg_params *p,
192 const struct sys_reg_desc *r)
195 return read_from_write_only(vcpu, p, r);
198 * Only track S/W ops if we don't have FWB. It still indicates
199 * that the guest is a bit broken (S/W operations should only
200 * be done by firmware, knowing that there is only a single
201 * CPU left in the system, and certainly not from non-secure
204 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
205 kvm_set_way_flush(vcpu);
211 * Generic accessor for VM registers. Only called as long as HCR_TVM
212 * is set. If the guest enables the MMU, we stop trapping the VM
213 * sys_regs and leave it in complete control of the caches.
215 static bool access_vm_reg(struct kvm_vcpu *vcpu,
216 struct sys_reg_params *p,
217 const struct sys_reg_desc *r)
219 bool was_enabled = vcpu_has_cache_enabled(vcpu);
223 BUG_ON(!p->is_write);
225 /* See the 32bit mapping in kvm_host.h */
229 if (!p->is_aarch32 || !p->is_32bit) {
232 val = vcpu_read_sys_reg(vcpu, reg);
234 val = (p->regval << 32) | (u64)lower_32_bits(val);
236 val = ((u64)upper_32_bits(val) << 32) |
237 lower_32_bits(p->regval);
239 vcpu_write_sys_reg(vcpu, val, reg);
241 kvm_toggle_cache(vcpu, was_enabled);
246 * Trap handler for the GICv3 SGI generation system register.
247 * Forward the request to the VGIC emulation.
248 * The cp15_64 code makes sure this automatically works
249 * for both AArch64 and AArch32 accesses.
251 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
252 struct sys_reg_params *p,
253 const struct sys_reg_desc *r)
258 return read_from_write_only(vcpu, p, r);
261 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
262 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
263 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
264 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
269 default: /* Keep GCC quiet */
270 case 0: /* ICC_SGI1R */
273 case 1: /* ICC_ASGI1R */
274 case 2: /* ICC_SGI0R */
280 default: /* Keep GCC quiet */
281 case 5: /* ICC_SGI1R_EL1 */
284 case 6: /* ICC_ASGI1R_EL1 */
285 case 7: /* ICC_SGI0R_EL1 */
291 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
296 static bool access_gic_sre(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *r)
301 return ignore_write(vcpu, p);
303 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
307 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
308 struct sys_reg_params *p,
309 const struct sys_reg_desc *r)
312 return ignore_write(vcpu, p);
314 return read_zero(vcpu, p);
318 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
319 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
320 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
321 * treat it separately.
323 static bool trap_loregion(struct kvm_vcpu *vcpu,
324 struct sys_reg_params *p,
325 const struct sys_reg_desc *r)
327 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
328 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
329 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
331 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
332 kvm_inject_undefined(vcpu);
336 if (p->is_write && sr == SYS_LORID_EL1)
337 return write_to_read_only(vcpu, p, r);
339 return trap_raz_wi(vcpu, p, r);
342 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
343 struct sys_reg_params *p,
344 const struct sys_reg_desc *r)
347 return ignore_write(vcpu, p);
349 p->regval = (1 << 3);
354 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
355 struct sys_reg_params *p,
356 const struct sys_reg_desc *r)
359 return ignore_write(vcpu, p);
361 p->regval = read_sysreg(dbgauthstatus_el1);
367 * We want to avoid world-switching all the DBG registers all the
370 * - If we've touched any debug register, it is likely that we're
371 * going to touch more of them. It then makes sense to disable the
372 * traps and start doing the save/restore dance
373 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
374 * then mandatory to save/restore the registers, as the guest
377 * For this, we use a DIRTY bit, indicating the guest has modified the
378 * debug registers, used as follow:
381 * - If the dirty bit is set (because we're coming back from trapping),
382 * disable the traps, save host registers, restore guest registers.
383 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
384 * set the dirty bit, disable the traps, save host registers,
385 * restore guest registers.
386 * - Otherwise, enable the traps
389 * - If the dirty bit is set, save guest registers, restore host
390 * registers and clear the dirty bit. This ensure that the host can
391 * now use the debug registers.
393 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
394 struct sys_reg_params *p,
395 const struct sys_reg_desc *r)
398 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
399 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
401 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
404 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
410 * reg_to_dbg/dbg_to_reg
412 * A 32 bit write to a debug register leave top bits alone
413 * A 32 bit read from a debug register only returns the bottom bits
415 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
416 * hyp.S code switches between host and guest values in future.
418 static void reg_to_dbg(struct kvm_vcpu *vcpu,
419 struct sys_reg_params *p,
426 val |= ((*dbg_reg >> 32) << 32);
430 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
433 static void dbg_to_reg(struct kvm_vcpu *vcpu,
434 struct sys_reg_params *p,
437 p->regval = *dbg_reg;
439 p->regval &= 0xffffffffUL;
442 static bool trap_bvr(struct kvm_vcpu *vcpu,
443 struct sys_reg_params *p,
444 const struct sys_reg_desc *rd)
446 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
449 reg_to_dbg(vcpu, p, dbg_reg);
451 dbg_to_reg(vcpu, p, dbg_reg);
453 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
458 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
459 const struct kvm_one_reg *reg, void __user *uaddr)
461 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
463 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
468 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
469 const struct kvm_one_reg *reg, void __user *uaddr)
471 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
473 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
478 static void reset_bvr(struct kvm_vcpu *vcpu,
479 const struct sys_reg_desc *rd)
481 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
484 static bool trap_bcr(struct kvm_vcpu *vcpu,
485 struct sys_reg_params *p,
486 const struct sys_reg_desc *rd)
488 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
491 reg_to_dbg(vcpu, p, dbg_reg);
493 dbg_to_reg(vcpu, p, dbg_reg);
495 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
500 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
501 const struct kvm_one_reg *reg, void __user *uaddr)
503 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
505 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
511 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
512 const struct kvm_one_reg *reg, void __user *uaddr)
514 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
516 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
521 static void reset_bcr(struct kvm_vcpu *vcpu,
522 const struct sys_reg_desc *rd)
524 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
527 static bool trap_wvr(struct kvm_vcpu *vcpu,
528 struct sys_reg_params *p,
529 const struct sys_reg_desc *rd)
531 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
534 reg_to_dbg(vcpu, p, dbg_reg);
536 dbg_to_reg(vcpu, p, dbg_reg);
538 trace_trap_reg(__func__, rd->reg, p->is_write,
539 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
544 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
545 const struct kvm_one_reg *reg, void __user *uaddr)
547 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
549 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
554 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
555 const struct kvm_one_reg *reg, void __user *uaddr)
557 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
559 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
564 static void reset_wvr(struct kvm_vcpu *vcpu,
565 const struct sys_reg_desc *rd)
567 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
570 static bool trap_wcr(struct kvm_vcpu *vcpu,
571 struct sys_reg_params *p,
572 const struct sys_reg_desc *rd)
574 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
577 reg_to_dbg(vcpu, p, dbg_reg);
579 dbg_to_reg(vcpu, p, dbg_reg);
581 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
586 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
587 const struct kvm_one_reg *reg, void __user *uaddr)
589 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
591 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
596 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
597 const struct kvm_one_reg *reg, void __user *uaddr)
599 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
601 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
606 static void reset_wcr(struct kvm_vcpu *vcpu,
607 const struct sys_reg_desc *rd)
609 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
612 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
614 u64 amair = read_sysreg(amair_el1);
615 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
618 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
623 * Map the vcpu_id into the first three affinity level fields of
624 * the MPIDR. We limit the number of VCPUs in level 0 due to a
625 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
626 * of the GICv3 to be able to address each CPU directly when
629 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
630 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
631 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
632 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
635 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
639 pmcr = read_sysreg(pmcr_el0);
641 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
642 * except PMCR.E resetting to zero.
644 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
645 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
646 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
649 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
651 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
652 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
655 kvm_inject_undefined(vcpu);
660 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
662 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
665 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
667 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
670 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
672 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
675 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
677 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
680 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
681 const struct sys_reg_desc *r)
685 if (!kvm_arm_pmu_v3_ready(vcpu))
686 return trap_raz_wi(vcpu, p, r);
688 if (pmu_access_el0_disabled(vcpu))
692 /* Only update writeable bits of PMCR */
693 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
694 val &= ~ARMV8_PMU_PMCR_MASK;
695 val |= p->regval & ARMV8_PMU_PMCR_MASK;
696 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
697 kvm_pmu_handle_pmcr(vcpu, val);
699 /* PMCR.P & PMCR.C are RAZ */
700 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
701 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
708 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
709 const struct sys_reg_desc *r)
711 if (!kvm_arm_pmu_v3_ready(vcpu))
712 return trap_raz_wi(vcpu, p, r);
714 if (pmu_access_event_counter_el0_disabled(vcpu))
718 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
720 /* return PMSELR.SEL field */
721 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
722 & ARMV8_PMU_COUNTER_MASK;
727 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
728 const struct sys_reg_desc *r)
732 if (!kvm_arm_pmu_v3_ready(vcpu))
733 return trap_raz_wi(vcpu, p, r);
737 if (pmu_access_el0_disabled(vcpu))
741 pmceid = read_sysreg(pmceid0_el0);
743 pmceid = read_sysreg(pmceid1_el0);
750 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
754 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
755 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
756 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
757 kvm_inject_undefined(vcpu);
764 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
765 struct sys_reg_params *p,
766 const struct sys_reg_desc *r)
770 if (!kvm_arm_pmu_v3_ready(vcpu))
771 return trap_raz_wi(vcpu, p, r);
773 if (r->CRn == 9 && r->CRm == 13) {
776 if (pmu_access_event_counter_el0_disabled(vcpu))
779 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
780 & ARMV8_PMU_COUNTER_MASK;
781 } else if (r->Op2 == 0) {
783 if (pmu_access_cycle_counter_el0_disabled(vcpu))
786 idx = ARMV8_PMU_CYCLE_IDX;
790 } else if (r->CRn == 0 && r->CRm == 9) {
792 if (pmu_access_event_counter_el0_disabled(vcpu))
795 idx = ARMV8_PMU_CYCLE_IDX;
796 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
798 if (pmu_access_event_counter_el0_disabled(vcpu))
801 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
806 if (!pmu_counter_idx_valid(vcpu, idx))
810 if (pmu_access_el0_disabled(vcpu))
813 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
815 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
821 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
822 const struct sys_reg_desc *r)
826 if (!kvm_arm_pmu_v3_ready(vcpu))
827 return trap_raz_wi(vcpu, p, r);
829 if (pmu_access_el0_disabled(vcpu))
832 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
834 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
835 reg = PMEVTYPER0_EL0 + idx;
836 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
837 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
838 if (idx == ARMV8_PMU_CYCLE_IDX)
842 reg = PMEVTYPER0_EL0 + idx;
847 if (!pmu_counter_idx_valid(vcpu, idx))
851 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
852 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
854 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
860 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
865 if (!kvm_arm_pmu_v3_ready(vcpu))
866 return trap_raz_wi(vcpu, p, r);
868 if (pmu_access_el0_disabled(vcpu))
871 mask = kvm_pmu_valid_counter_mask(vcpu);
873 val = p->regval & mask;
875 /* accessing PMCNTENSET_EL0 */
876 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
877 kvm_pmu_enable_counter(vcpu, val);
879 /* accessing PMCNTENCLR_EL0 */
880 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
881 kvm_pmu_disable_counter(vcpu, val);
884 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
890 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
891 const struct sys_reg_desc *r)
893 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
895 if (!kvm_arm_pmu_v3_ready(vcpu))
896 return trap_raz_wi(vcpu, p, r);
898 if (!vcpu_mode_priv(vcpu)) {
899 kvm_inject_undefined(vcpu);
904 u64 val = p->regval & mask;
907 /* accessing PMINTENSET_EL1 */
908 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
910 /* accessing PMINTENCLR_EL1 */
911 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
913 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
919 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
920 const struct sys_reg_desc *r)
922 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
924 if (!kvm_arm_pmu_v3_ready(vcpu))
925 return trap_raz_wi(vcpu, p, r);
927 if (pmu_access_el0_disabled(vcpu))
932 /* accessing PMOVSSET_EL0 */
933 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
935 /* accessing PMOVSCLR_EL0 */
936 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
938 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
944 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
945 const struct sys_reg_desc *r)
949 if (!kvm_arm_pmu_v3_ready(vcpu))
950 return trap_raz_wi(vcpu, p, r);
953 return read_from_write_only(vcpu, p, r);
955 if (pmu_write_swinc_el0_disabled(vcpu))
958 mask = kvm_pmu_valid_counter_mask(vcpu);
959 kvm_pmu_software_increment(vcpu, p->regval & mask);
963 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
964 const struct sys_reg_desc *r)
966 if (!kvm_arm_pmu_v3_ready(vcpu))
967 return trap_raz_wi(vcpu, p, r);
970 if (!vcpu_mode_priv(vcpu)) {
971 kvm_inject_undefined(vcpu);
975 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
976 p->regval & ARMV8_PMU_USERENR_MASK;
978 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
979 & ARMV8_PMU_USERENR_MASK;
985 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
986 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
987 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
988 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
989 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
990 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
991 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
992 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
993 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
994 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
996 /* Macro to expand the PMEVCNTRn_EL0 register */
997 #define PMU_PMEVCNTR_EL0(n) \
998 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
999 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1001 /* Macro to expand the PMEVTYPERn_EL0 register */
1002 #define PMU_PMEVTYPER_EL0(n) \
1003 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1004 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1006 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
1007 struct sys_reg_params *p,
1008 const struct sys_reg_desc *r)
1010 u64 now = kvm_phys_timer_read();
1014 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
1017 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1018 p->regval = cval - now;
1024 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
1025 struct sys_reg_params *p,
1026 const struct sys_reg_desc *r)
1029 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
1031 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
1036 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
1037 struct sys_reg_params *p,
1038 const struct sys_reg_desc *r)
1041 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
1043 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
1048 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1049 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
1051 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1052 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1053 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1055 if (id == SYS_ID_AA64PFR0_EL1) {
1056 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
1057 kvm_debug("SVE unsupported for guests, suppressing\n");
1059 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1060 } else if (id == SYS_ID_AA64ISAR1_EL1) {
1061 const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1062 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1063 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1064 (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
1065 if (val & ptrauth_mask)
1066 kvm_debug("ptrauth unsupported for guests, suppressing\n");
1067 val &= ~ptrauth_mask;
1073 /* cpufeature ID register access trap handlers */
1075 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1076 struct sys_reg_params *p,
1077 const struct sys_reg_desc *r,
1081 return write_to_read_only(vcpu, p, r);
1083 p->regval = read_id_reg(r, raz);
1087 static bool access_id_reg(struct kvm_vcpu *vcpu,
1088 struct sys_reg_params *p,
1089 const struct sys_reg_desc *r)
1091 return __access_id_reg(vcpu, p, r, false);
1094 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1095 struct sys_reg_params *p,
1096 const struct sys_reg_desc *r)
1098 return __access_id_reg(vcpu, p, r, true);
1101 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1102 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1103 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1106 * cpufeature ID register user accessors
1108 * For now, these registers are immutable for userspace, so no values
1109 * are stored, and for set_id_reg() we don't allow the effective value
1112 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1115 const u64 id = sys_reg_to_index(rd);
1116 const u64 val = read_id_reg(rd, raz);
1118 return reg_to_user(uaddr, &val, id);
1121 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
1124 const u64 id = sys_reg_to_index(rd);
1128 err = reg_from_user(&val, uaddr, id);
1132 /* This is what we mean by invariant: you can't change it. */
1133 if (val != read_id_reg(rd, raz))
1139 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1140 const struct kvm_one_reg *reg, void __user *uaddr)
1142 return __get_id_reg(rd, uaddr, false);
1145 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1146 const struct kvm_one_reg *reg, void __user *uaddr)
1148 return __set_id_reg(rd, uaddr, false);
1151 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1152 const struct kvm_one_reg *reg, void __user *uaddr)
1154 return __get_id_reg(rd, uaddr, true);
1157 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1158 const struct kvm_one_reg *reg, void __user *uaddr)
1160 return __set_id_reg(rd, uaddr, true);
1163 /* sys_reg_desc initialiser for known cpufeature ID registers */
1164 #define ID_SANITISED(name) { \
1165 SYS_DESC(SYS_##name), \
1166 .access = access_id_reg, \
1167 .get_user = get_id_reg, \
1168 .set_user = set_id_reg, \
1172 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1173 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1174 * (1 <= crm < 8, 0 <= Op2 < 8).
1176 #define ID_UNALLOCATED(crm, op2) { \
1177 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1178 .access = access_raz_id_reg, \
1179 .get_user = get_raz_id_reg, \
1180 .set_user = set_raz_id_reg, \
1184 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1185 * For now, these are exposed just like unallocated ID regs: they appear
1186 * RAZ for the guest.
1188 #define ID_HIDDEN(name) { \
1189 SYS_DESC(SYS_##name), \
1190 .access = access_raz_id_reg, \
1191 .get_user = get_raz_id_reg, \
1192 .set_user = set_raz_id_reg, \
1196 * Architected system registers.
1197 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1199 * Debug handling: We do trap most, if not all debug related system
1200 * registers. The implementation is good enough to ensure that a guest
1201 * can use these with minimal performance degradation. The drawback is
1202 * that we don't implement any of the external debug, none of the
1203 * OSlock protocol. This should be revisited if we ever encounter a
1204 * more demanding guest...
1206 static const struct sys_reg_desc sys_reg_descs[] = {
1207 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1208 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1209 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1211 DBG_BCR_BVR_WCR_WVR_EL1(0),
1212 DBG_BCR_BVR_WCR_WVR_EL1(1),
1213 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1214 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1215 DBG_BCR_BVR_WCR_WVR_EL1(2),
1216 DBG_BCR_BVR_WCR_WVR_EL1(3),
1217 DBG_BCR_BVR_WCR_WVR_EL1(4),
1218 DBG_BCR_BVR_WCR_WVR_EL1(5),
1219 DBG_BCR_BVR_WCR_WVR_EL1(6),
1220 DBG_BCR_BVR_WCR_WVR_EL1(7),
1221 DBG_BCR_BVR_WCR_WVR_EL1(8),
1222 DBG_BCR_BVR_WCR_WVR_EL1(9),
1223 DBG_BCR_BVR_WCR_WVR_EL1(10),
1224 DBG_BCR_BVR_WCR_WVR_EL1(11),
1225 DBG_BCR_BVR_WCR_WVR_EL1(12),
1226 DBG_BCR_BVR_WCR_WVR_EL1(13),
1227 DBG_BCR_BVR_WCR_WVR_EL1(14),
1228 DBG_BCR_BVR_WCR_WVR_EL1(15),
1230 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1231 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1232 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1233 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1234 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1235 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1236 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1237 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1239 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1240 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1241 // DBGDTR[TR]X_EL0 share the same encoding
1242 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1244 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1246 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1249 * ID regs: all ID_SANITISED() entries here must have corresponding
1250 * entries in arm64_ftr_regs[].
1253 /* AArch64 mappings of the AArch32 ID registers */
1255 ID_SANITISED(ID_PFR0_EL1),
1256 ID_SANITISED(ID_PFR1_EL1),
1257 ID_SANITISED(ID_DFR0_EL1),
1258 ID_HIDDEN(ID_AFR0_EL1),
1259 ID_SANITISED(ID_MMFR0_EL1),
1260 ID_SANITISED(ID_MMFR1_EL1),
1261 ID_SANITISED(ID_MMFR2_EL1),
1262 ID_SANITISED(ID_MMFR3_EL1),
1265 ID_SANITISED(ID_ISAR0_EL1),
1266 ID_SANITISED(ID_ISAR1_EL1),
1267 ID_SANITISED(ID_ISAR2_EL1),
1268 ID_SANITISED(ID_ISAR3_EL1),
1269 ID_SANITISED(ID_ISAR4_EL1),
1270 ID_SANITISED(ID_ISAR5_EL1),
1271 ID_SANITISED(ID_MMFR4_EL1),
1272 ID_UNALLOCATED(2,7),
1275 ID_SANITISED(MVFR0_EL1),
1276 ID_SANITISED(MVFR1_EL1),
1277 ID_SANITISED(MVFR2_EL1),
1278 ID_UNALLOCATED(3,3),
1279 ID_UNALLOCATED(3,4),
1280 ID_UNALLOCATED(3,5),
1281 ID_UNALLOCATED(3,6),
1282 ID_UNALLOCATED(3,7),
1284 /* AArch64 ID registers */
1286 ID_SANITISED(ID_AA64PFR0_EL1),
1287 ID_SANITISED(ID_AA64PFR1_EL1),
1288 ID_UNALLOCATED(4,2),
1289 ID_UNALLOCATED(4,3),
1290 ID_UNALLOCATED(4,4),
1291 ID_UNALLOCATED(4,5),
1292 ID_UNALLOCATED(4,6),
1293 ID_UNALLOCATED(4,7),
1296 ID_SANITISED(ID_AA64DFR0_EL1),
1297 ID_SANITISED(ID_AA64DFR1_EL1),
1298 ID_UNALLOCATED(5,2),
1299 ID_UNALLOCATED(5,3),
1300 ID_HIDDEN(ID_AA64AFR0_EL1),
1301 ID_HIDDEN(ID_AA64AFR1_EL1),
1302 ID_UNALLOCATED(5,6),
1303 ID_UNALLOCATED(5,7),
1306 ID_SANITISED(ID_AA64ISAR0_EL1),
1307 ID_SANITISED(ID_AA64ISAR1_EL1),
1308 ID_UNALLOCATED(6,2),
1309 ID_UNALLOCATED(6,3),
1310 ID_UNALLOCATED(6,4),
1311 ID_UNALLOCATED(6,5),
1312 ID_UNALLOCATED(6,6),
1313 ID_UNALLOCATED(6,7),
1316 ID_SANITISED(ID_AA64MMFR0_EL1),
1317 ID_SANITISED(ID_AA64MMFR1_EL1),
1318 ID_SANITISED(ID_AA64MMFR2_EL1),
1319 ID_UNALLOCATED(7,3),
1320 ID_UNALLOCATED(7,4),
1321 ID_UNALLOCATED(7,5),
1322 ID_UNALLOCATED(7,6),
1323 ID_UNALLOCATED(7,7),
1325 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1326 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1327 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1328 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1329 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1331 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1332 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1333 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1335 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1336 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1337 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1338 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1339 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1340 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1341 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1342 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1344 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1345 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1347 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1348 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1350 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1351 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1353 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1354 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1355 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1356 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1357 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1359 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1360 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1362 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1363 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1364 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1365 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1366 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1367 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1368 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1369 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1370 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1371 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1372 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1373 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1375 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1376 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1378 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1380 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
1382 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1383 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1384 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1385 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1386 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1387 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1388 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1389 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1390 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1391 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1392 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1394 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1395 * in 32bit mode. Here we choose to reset it as zero for consistency.
1397 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1398 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1400 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1401 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1403 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1404 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1405 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1408 PMU_PMEVCNTR_EL0(0),
1409 PMU_PMEVCNTR_EL0(1),
1410 PMU_PMEVCNTR_EL0(2),
1411 PMU_PMEVCNTR_EL0(3),
1412 PMU_PMEVCNTR_EL0(4),
1413 PMU_PMEVCNTR_EL0(5),
1414 PMU_PMEVCNTR_EL0(6),
1415 PMU_PMEVCNTR_EL0(7),
1416 PMU_PMEVCNTR_EL0(8),
1417 PMU_PMEVCNTR_EL0(9),
1418 PMU_PMEVCNTR_EL0(10),
1419 PMU_PMEVCNTR_EL0(11),
1420 PMU_PMEVCNTR_EL0(12),
1421 PMU_PMEVCNTR_EL0(13),
1422 PMU_PMEVCNTR_EL0(14),
1423 PMU_PMEVCNTR_EL0(15),
1424 PMU_PMEVCNTR_EL0(16),
1425 PMU_PMEVCNTR_EL0(17),
1426 PMU_PMEVCNTR_EL0(18),
1427 PMU_PMEVCNTR_EL0(19),
1428 PMU_PMEVCNTR_EL0(20),
1429 PMU_PMEVCNTR_EL0(21),
1430 PMU_PMEVCNTR_EL0(22),
1431 PMU_PMEVCNTR_EL0(23),
1432 PMU_PMEVCNTR_EL0(24),
1433 PMU_PMEVCNTR_EL0(25),
1434 PMU_PMEVCNTR_EL0(26),
1435 PMU_PMEVCNTR_EL0(27),
1436 PMU_PMEVCNTR_EL0(28),
1437 PMU_PMEVCNTR_EL0(29),
1438 PMU_PMEVCNTR_EL0(30),
1439 /* PMEVTYPERn_EL0 */
1440 PMU_PMEVTYPER_EL0(0),
1441 PMU_PMEVTYPER_EL0(1),
1442 PMU_PMEVTYPER_EL0(2),
1443 PMU_PMEVTYPER_EL0(3),
1444 PMU_PMEVTYPER_EL0(4),
1445 PMU_PMEVTYPER_EL0(5),
1446 PMU_PMEVTYPER_EL0(6),
1447 PMU_PMEVTYPER_EL0(7),
1448 PMU_PMEVTYPER_EL0(8),
1449 PMU_PMEVTYPER_EL0(9),
1450 PMU_PMEVTYPER_EL0(10),
1451 PMU_PMEVTYPER_EL0(11),
1452 PMU_PMEVTYPER_EL0(12),
1453 PMU_PMEVTYPER_EL0(13),
1454 PMU_PMEVTYPER_EL0(14),
1455 PMU_PMEVTYPER_EL0(15),
1456 PMU_PMEVTYPER_EL0(16),
1457 PMU_PMEVTYPER_EL0(17),
1458 PMU_PMEVTYPER_EL0(18),
1459 PMU_PMEVTYPER_EL0(19),
1460 PMU_PMEVTYPER_EL0(20),
1461 PMU_PMEVTYPER_EL0(21),
1462 PMU_PMEVTYPER_EL0(22),
1463 PMU_PMEVTYPER_EL0(23),
1464 PMU_PMEVTYPER_EL0(24),
1465 PMU_PMEVTYPER_EL0(25),
1466 PMU_PMEVTYPER_EL0(26),
1467 PMU_PMEVTYPER_EL0(27),
1468 PMU_PMEVTYPER_EL0(28),
1469 PMU_PMEVTYPER_EL0(29),
1470 PMU_PMEVTYPER_EL0(30),
1472 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1473 * in 32bit mode. Here we choose to reset it as zero for consistency.
1475 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1477 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1478 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1479 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1482 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1483 struct sys_reg_params *p,
1484 const struct sys_reg_desc *r)
1487 return ignore_write(vcpu, p);
1489 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1490 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1491 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1493 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1494 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1495 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1496 | (6 << 16) | (el3 << 14) | (el3 << 12));
1501 static bool trap_debug32(struct kvm_vcpu *vcpu,
1502 struct sys_reg_params *p,
1503 const struct sys_reg_desc *r)
1506 vcpu_cp14(vcpu, r->reg) = p->regval;
1507 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1509 p->regval = vcpu_cp14(vcpu, r->reg);
1515 /* AArch32 debug register mappings
1517 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1518 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1520 * All control registers and watchpoint value registers are mapped to
1521 * the lower 32 bits of their AArch64 equivalents. We share the trap
1522 * handlers with the above AArch64 code which checks what mode the
1526 static bool trap_xvr(struct kvm_vcpu *vcpu,
1527 struct sys_reg_params *p,
1528 const struct sys_reg_desc *rd)
1530 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1535 val &= 0xffffffffUL;
1536 val |= p->regval << 32;
1539 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1541 p->regval = *dbg_reg >> 32;
1544 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1549 #define DBG_BCR_BVR_WCR_WVR(n) \
1551 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1553 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1555 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1557 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1559 #define DBGBXVR(n) \
1560 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1563 * Trapped cp14 registers. We generally ignore most of the external
1564 * debug, on the principle that they don't really make sense to a
1565 * guest. Revisit this one day, would this principle change.
1567 static const struct sys_reg_desc cp14_regs[] = {
1569 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1571 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1573 DBG_BCR_BVR_WCR_WVR(0),
1575 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1576 DBG_BCR_BVR_WCR_WVR(1),
1578 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1580 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1581 DBG_BCR_BVR_WCR_WVR(2),
1582 /* DBGDTR[RT]Xint */
1583 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1584 /* DBGDTR[RT]Xext */
1585 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1586 DBG_BCR_BVR_WCR_WVR(3),
1587 DBG_BCR_BVR_WCR_WVR(4),
1588 DBG_BCR_BVR_WCR_WVR(5),
1590 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1592 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1593 DBG_BCR_BVR_WCR_WVR(6),
1595 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1596 DBG_BCR_BVR_WCR_WVR(7),
1597 DBG_BCR_BVR_WCR_WVR(8),
1598 DBG_BCR_BVR_WCR_WVR(9),
1599 DBG_BCR_BVR_WCR_WVR(10),
1600 DBG_BCR_BVR_WCR_WVR(11),
1601 DBG_BCR_BVR_WCR_WVR(12),
1602 DBG_BCR_BVR_WCR_WVR(13),
1603 DBG_BCR_BVR_WCR_WVR(14),
1604 DBG_BCR_BVR_WCR_WVR(15),
1606 /* DBGDRAR (32bit) */
1607 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1611 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1614 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1618 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1621 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1634 /* DBGDSAR (32bit) */
1635 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1638 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1640 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1642 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1644 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1646 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1648 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1651 /* Trapped cp14 64bit registers */
1652 static const struct sys_reg_desc cp14_64_regs[] = {
1653 /* DBGDRAR (64bit) */
1654 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1656 /* DBGDSAR (64bit) */
1657 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1660 /* Macro to expand the PMEVCNTRn register */
1661 #define PMU_PMEVCNTR(n) \
1663 { Op1(0), CRn(0b1110), \
1664 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1667 /* Macro to expand the PMEVTYPERn register */
1668 #define PMU_PMEVTYPER(n) \
1670 { Op1(0), CRn(0b1110), \
1671 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1672 access_pmu_evtyper }
1675 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1676 * depending on the way they are accessed (as a 32bit or a 64bit
1679 static const struct sys_reg_desc cp15_regs[] = {
1680 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1681 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1682 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1683 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1684 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1685 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1686 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1687 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1688 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1689 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1690 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1693 * DC{C,I,CI}SW operations:
1695 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1696 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1697 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1700 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1701 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1702 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1703 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1704 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1705 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1706 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1707 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1708 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1709 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1710 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1711 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1712 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1713 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1714 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1716 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1717 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1718 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1719 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1722 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1724 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1727 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
1729 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
1796 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1799 static const struct sys_reg_desc cp15_64_regs[] = {
1800 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1801 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1802 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
1803 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1804 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
1805 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
1806 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
1809 /* Target specific emulation tables */
1810 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1812 void kvm_register_target_sys_reg_table(unsigned int target,
1813 struct kvm_sys_reg_target_table *table)
1815 target_tables[target] = table;
1818 /* Get specific register table for this target. */
1819 static const struct sys_reg_desc *get_target_table(unsigned target,
1823 struct kvm_sys_reg_target_table *table;
1825 table = target_tables[target];
1827 *num = table->table64.num;
1828 return table->table64.table;
1830 *num = table->table32.num;
1831 return table->table32.table;
1835 #define reg_to_match_value(x) \
1837 unsigned long val; \
1838 val = (x)->Op0 << 14; \
1839 val |= (x)->Op1 << 11; \
1840 val |= (x)->CRn << 7; \
1841 val |= (x)->CRm << 3; \
1846 static int match_sys_reg(const void *key, const void *elt)
1848 const unsigned long pval = (unsigned long)key;
1849 const struct sys_reg_desc *r = elt;
1851 return pval - reg_to_match_value(r);
1854 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1855 const struct sys_reg_desc table[],
1858 unsigned long pval = reg_to_match_value(params);
1860 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1863 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1865 kvm_inject_undefined(vcpu);
1869 static void perform_access(struct kvm_vcpu *vcpu,
1870 struct sys_reg_params *params,
1871 const struct sys_reg_desc *r)
1873 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
1876 * Not having an accessor means that we have configured a trap
1877 * that we don't know how to handle. This certainly qualifies
1878 * as a gross bug that should be fixed right away.
1882 /* Skip instruction if instructed so */
1883 if (likely(r->access(vcpu, params, r)))
1884 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1888 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1889 * call the corresponding trap handler.
1891 * @params: pointer to the descriptor of the access
1892 * @table: array of trap descriptors
1893 * @num: size of the trap descriptor array
1895 * Return 0 if the access has been handled, and -1 if not.
1897 static int emulate_cp(struct kvm_vcpu *vcpu,
1898 struct sys_reg_params *params,
1899 const struct sys_reg_desc *table,
1902 const struct sys_reg_desc *r;
1905 return -1; /* Not handled */
1907 r = find_reg(params, table, num);
1910 perform_access(vcpu, params, r);
1918 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1919 struct sys_reg_params *params)
1921 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1925 case ESR_ELx_EC_CP15_32:
1926 case ESR_ELx_EC_CP15_64:
1929 case ESR_ELx_EC_CP14_MR:
1930 case ESR_ELx_EC_CP14_64:
1937 kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
1938 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
1939 print_sys_reg_instr(params);
1940 kvm_inject_undefined(vcpu);
1944 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1945 * @vcpu: The VCPU pointer
1946 * @run: The kvm_run struct
1948 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1949 const struct sys_reg_desc *global,
1951 const struct sys_reg_desc *target_specific,
1954 struct sys_reg_params params;
1955 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1956 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1957 int Rt2 = (hsr >> 10) & 0x1f;
1959 params.is_aarch32 = true;
1960 params.is_32bit = false;
1961 params.CRm = (hsr >> 1) & 0xf;
1962 params.is_write = ((hsr & 1) == 0);
1965 params.Op1 = (hsr >> 16) & 0xf;
1970 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1971 * backends between AArch32 and AArch64, we get away with it.
1973 if (params.is_write) {
1974 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1975 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1979 * Try to emulate the coprocessor access using the target
1980 * specific table first, and using the global table afterwards.
1981 * If either of the tables contains a handler, handle the
1982 * potential register operation in the case of a read and return
1985 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1986 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1987 /* Split up the value between registers for the read side */
1988 if (!params.is_write) {
1989 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1990 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1996 unhandled_cp_access(vcpu, ¶ms);
2001 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2002 * @vcpu: The VCPU pointer
2003 * @run: The kvm_run struct
2005 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2006 const struct sys_reg_desc *global,
2008 const struct sys_reg_desc *target_specific,
2011 struct sys_reg_params params;
2012 u32 hsr = kvm_vcpu_get_hsr(vcpu);
2013 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2015 params.is_aarch32 = true;
2016 params.is_32bit = true;
2017 params.CRm = (hsr >> 1) & 0xf;
2018 params.regval = vcpu_get_reg(vcpu, Rt);
2019 params.is_write = ((hsr & 1) == 0);
2020 params.CRn = (hsr >> 10) & 0xf;
2022 params.Op1 = (hsr >> 14) & 0x7;
2023 params.Op2 = (hsr >> 17) & 0x7;
2025 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
2026 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
2027 if (!params.is_write)
2028 vcpu_set_reg(vcpu, Rt, params.regval);
2032 unhandled_cp_access(vcpu, ¶ms);
2036 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2038 const struct sys_reg_desc *target_specific;
2041 target_specific = get_target_table(vcpu->arch.target, false, &num);
2042 return kvm_handle_cp_64(vcpu,
2043 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
2044 target_specific, num);
2047 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2049 const struct sys_reg_desc *target_specific;
2052 target_specific = get_target_table(vcpu->arch.target, false, &num);
2053 return kvm_handle_cp_32(vcpu,
2054 cp15_regs, ARRAY_SIZE(cp15_regs),
2055 target_specific, num);
2058 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
2060 return kvm_handle_cp_64(vcpu,
2061 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
2065 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
2067 return kvm_handle_cp_32(vcpu,
2068 cp14_regs, ARRAY_SIZE(cp14_regs),
2072 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2073 struct sys_reg_params *params)
2076 const struct sys_reg_desc *table, *r;
2078 table = get_target_table(vcpu->arch.target, true, &num);
2080 /* Search target-specific then generic table. */
2081 r = find_reg(params, table, num);
2083 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2086 perform_access(vcpu, params, r);
2088 kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
2089 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2090 print_sys_reg_instr(params);
2091 kvm_inject_undefined(vcpu);
2096 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
2097 const struct sys_reg_desc *table, size_t num)
2101 for (i = 0; i < num; i++)
2103 table[i].reset(vcpu, &table[i]);
2107 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2108 * @vcpu: The VCPU pointer
2109 * @run: The kvm_run struct
2111 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
2113 struct sys_reg_params params;
2114 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
2115 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2118 trace_kvm_handle_sys_reg(esr);
2120 params.is_aarch32 = false;
2121 params.is_32bit = false;
2122 params.Op0 = (esr >> 20) & 3;
2123 params.Op1 = (esr >> 14) & 0x7;
2124 params.CRn = (esr >> 10) & 0xf;
2125 params.CRm = (esr >> 1) & 0xf;
2126 params.Op2 = (esr >> 17) & 0x7;
2127 params.regval = vcpu_get_reg(vcpu, Rt);
2128 params.is_write = !(esr & 1);
2130 ret = emulate_sys_reg(vcpu, ¶ms);
2132 if (!params.is_write)
2133 vcpu_set_reg(vcpu, Rt, params.regval);
2137 /******************************************************************************
2139 *****************************************************************************/
2141 static bool index_to_params(u64 id, struct sys_reg_params *params)
2143 switch (id & KVM_REG_SIZE_MASK) {
2144 case KVM_REG_SIZE_U64:
2145 /* Any unused index bits means it's not valid. */
2146 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2147 | KVM_REG_ARM_COPROC_MASK
2148 | KVM_REG_ARM64_SYSREG_OP0_MASK
2149 | KVM_REG_ARM64_SYSREG_OP1_MASK
2150 | KVM_REG_ARM64_SYSREG_CRN_MASK
2151 | KVM_REG_ARM64_SYSREG_CRM_MASK
2152 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2154 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2155 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2156 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2157 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2158 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2159 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2160 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2161 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2162 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2163 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2170 const struct sys_reg_desc *find_reg_by_id(u64 id,
2171 struct sys_reg_params *params,
2172 const struct sys_reg_desc table[],
2175 if (!index_to_params(id, params))
2178 return find_reg(params, table, num);
2181 /* Decode an index value, and find the sys_reg_desc entry. */
2182 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2186 const struct sys_reg_desc *table, *r;
2187 struct sys_reg_params params;
2189 /* We only do sys_reg for now. */
2190 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2193 table = get_target_table(vcpu->arch.target, true, &num);
2194 r = find_reg_by_id(id, ¶ms, table, num);
2196 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2198 /* Not saved in the sys_reg array and not otherwise accessible? */
2199 if (r && !(r->reg || r->get_user))
2206 * These are the invariant sys_reg registers: we let the guest see the
2207 * host versions of these, so they're part of the guest state.
2209 * A future CPU may provide a mechanism to present different values to
2210 * the guest, or a future kvm may trap them.
2213 #define FUNCTION_INVARIANT(reg) \
2214 static void get_##reg(struct kvm_vcpu *v, \
2215 const struct sys_reg_desc *r) \
2217 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2220 FUNCTION_INVARIANT(midr_el1)
2221 FUNCTION_INVARIANT(ctr_el0)
2222 FUNCTION_INVARIANT(revidr_el1)
2223 FUNCTION_INVARIANT(clidr_el1)
2224 FUNCTION_INVARIANT(aidr_el1)
2226 /* ->val is filled in by kvm_sys_reg_table_init() */
2227 static struct sys_reg_desc invariant_sys_regs[] = {
2228 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2229 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2230 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2231 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2232 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2235 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2237 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2242 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2244 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2249 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2251 struct sys_reg_params params;
2252 const struct sys_reg_desc *r;
2254 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2255 ARRAY_SIZE(invariant_sys_regs));
2259 return reg_to_user(uaddr, &r->val, id);
2262 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2264 struct sys_reg_params params;
2265 const struct sys_reg_desc *r;
2267 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2269 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2270 ARRAY_SIZE(invariant_sys_regs));
2274 err = reg_from_user(&val, uaddr, id);
2278 /* This is what we mean by invariant: you can't change it. */
2285 static bool is_valid_cache(u32 val)
2289 if (val >= CSSELR_MAX)
2292 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2294 ctype = (cache_levels >> (level * 3)) & 7;
2297 case 0: /* No cache */
2299 case 1: /* Instruction cache only */
2301 case 2: /* Data cache only */
2302 case 4: /* Unified cache */
2304 case 3: /* Separate instruction and data caches */
2306 default: /* Reserved: we can't know instruction or data. */
2311 static int demux_c15_get(u64 id, void __user *uaddr)
2314 u32 __user *uval = uaddr;
2316 /* Fail if we have unknown bits set. */
2317 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2318 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2321 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2322 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2323 if (KVM_REG_SIZE(id) != 4)
2325 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2326 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2327 if (!is_valid_cache(val))
2330 return put_user(get_ccsidr(val), uval);
2336 static int demux_c15_set(u64 id, void __user *uaddr)
2339 u32 __user *uval = uaddr;
2341 /* Fail if we have unknown bits set. */
2342 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2343 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2346 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2347 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2348 if (KVM_REG_SIZE(id) != 4)
2350 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2351 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2352 if (!is_valid_cache(val))
2355 if (get_user(newval, uval))
2358 /* This is also invariant: you can't change it. */
2359 if (newval != get_ccsidr(val))
2367 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2369 const struct sys_reg_desc *r;
2370 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2372 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2373 return demux_c15_get(reg->id, uaddr);
2375 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2378 r = index_to_sys_reg_desc(vcpu, reg->id);
2380 return get_invariant_sys_reg(reg->id, uaddr);
2383 return (r->get_user)(vcpu, r, reg, uaddr);
2385 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2388 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2390 const struct sys_reg_desc *r;
2391 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2393 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2394 return demux_c15_set(reg->id, uaddr);
2396 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2399 r = index_to_sys_reg_desc(vcpu, reg->id);
2401 return set_invariant_sys_reg(reg->id, uaddr);
2404 return (r->set_user)(vcpu, r, reg, uaddr);
2406 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2409 static unsigned int num_demux_regs(void)
2411 unsigned int i, count = 0;
2413 for (i = 0; i < CSSELR_MAX; i++)
2414 if (is_valid_cache(i))
2420 static int write_demux_regids(u64 __user *uindices)
2422 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2425 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2426 for (i = 0; i < CSSELR_MAX; i++) {
2427 if (!is_valid_cache(i))
2429 if (put_user(val | i, uindices))
2436 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2438 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2439 KVM_REG_ARM64_SYSREG |
2440 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2441 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2442 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2443 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2444 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2447 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2452 if (put_user(sys_reg_to_index(reg), *uind))
2459 static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2461 unsigned int *total)
2464 * Ignore registers we trap but don't save,
2465 * and for which no custom user accessor is provided.
2467 if (!(rd->reg || rd->get_user))
2470 if (!copy_reg_to_user(rd, uind))
2477 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2478 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2480 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2481 unsigned int total = 0;
2485 /* We check for duplicates here, to allow arch-specific overrides. */
2486 i1 = get_target_table(vcpu->arch.target, true, &num);
2489 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2491 BUG_ON(i1 == end1 || i2 == end2);
2493 /* Walk carefully, as both tables may refer to the same register. */
2495 int cmp = cmp_sys_reg(i1, i2);
2496 /* target-specific overrides generic entry. */
2498 err = walk_one_sys_reg(i1, &uind, &total);
2500 err = walk_one_sys_reg(i2, &uind, &total);
2505 if (cmp <= 0 && ++i1 == end1)
2507 if (cmp >= 0 && ++i2 == end2)
2513 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2515 return ARRAY_SIZE(invariant_sys_regs)
2517 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2520 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2525 /* Then give them all the invariant registers' indices. */
2526 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2527 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2532 err = walk_sys_regs(vcpu, uindices);
2537 return write_demux_regids(uindices);
2540 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2544 for (i = 1; i < n; i++) {
2545 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2546 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2554 void kvm_sys_reg_table_init(void)
2557 struct sys_reg_desc clidr;
2559 /* Make sure tables are unique and in order. */
2560 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2561 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2562 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2563 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2564 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2565 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2567 /* We abuse the reset function to overwrite the table itself. */
2568 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2569 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2572 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2574 * If software reads the Cache Type fields from Ctype1
2575 * upwards, once it has seen a value of 0b000, no caches
2576 * exist at further-out levels of the hierarchy. So, for
2577 * example, if Ctype3 is the first Cache Type field with a
2578 * value of 0b000, the values of Ctype4 to Ctype7 must be
2581 get_clidr_el1(NULL, &clidr); /* Ugly... */
2582 cache_levels = clidr.val;
2583 for (i = 0; i < 7; i++)
2584 if (((cache_levels >> (i*3)) & 7) == 0)
2586 /* Clear all higher bits. */
2587 cache_levels &= (1 << (i*3))-1;
2591 * kvm_reset_sys_regs - sets system registers to reset value
2592 * @vcpu: The VCPU pointer
2594 * This function finds the right table above and sets the registers on the
2595 * virtual CPU struct to their architecturally defined reset values.
2597 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2600 const struct sys_reg_desc *table;
2602 /* Catch someone adding a register without putting in reset entry. */
2603 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2605 /* Generic chip reset first (so target could override). */
2606 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2608 table = get_target_table(vcpu->arch.target, true, &num);
2609 reset_sys_reg_descs(vcpu, table, num);
2611 for (num = 1; num < NR_SYS_REGS; num++) {
2612 if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
2613 "Didn't reset __vcpu_sys_reg(%zi)\n", num))