1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bsearch.h>
13 #include <linux/kvm_host.h>
15 #include <linux/printk.h>
16 #include <linux/uaccess.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
22 #include <asm/kvm_arm.h>
23 #include <asm/kvm_coproc.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
30 #include <trace/events/kvm.h>
37 * All of this file is extremely similar to the ARM coproc.c, but the
38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted.
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
47 static bool read_from_write_only(struct kvm_vcpu *vcpu,
48 struct sys_reg_params *params,
49 const struct sys_reg_desc *r)
51 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
52 print_sys_reg_instr(params);
53 kvm_inject_undefined(vcpu);
57 static bool write_to_read_only(struct kvm_vcpu *vcpu,
58 struct sys_reg_params *params,
59 const struct sys_reg_desc *r)
61 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
62 print_sys_reg_instr(params);
63 kvm_inject_undefined(vcpu);
67 static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
70 * System registers listed in the switch are not saved on every
71 * exit from the guest but are only saved on vcpu_put.
73 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
74 * should never be listed below, because the guest cannot modify its
75 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
76 * thread when emulating cross-VCPU communication.
79 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
80 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
81 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
82 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
83 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
84 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
85 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
86 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
87 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
88 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
89 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
90 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
91 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
92 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
93 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
94 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
95 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
96 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
97 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
98 case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
99 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
100 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
101 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
102 default: return false;
108 static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
111 * System registers listed in the switch are not restored on every
112 * entry to the guest but are only restored on vcpu_load.
114 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
115 * should never be listed below, because the MPIDR should only be set
116 * once, before running the VCPU, and never changed later.
119 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
120 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
121 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
122 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
123 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
124 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
125 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
126 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
127 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
128 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
129 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
130 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
131 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
132 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
133 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
134 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
135 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
136 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
137 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
138 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
139 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
140 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
141 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
142 default: return false;
148 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
150 u64 val = 0x8badf00d8badf00d;
152 if (vcpu->arch.sysregs_loaded_on_cpu &&
153 __vcpu_read_sys_reg_from_cpu(reg, &val))
156 return __vcpu_sys_reg(vcpu, reg);
159 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
161 if (vcpu->arch.sysregs_loaded_on_cpu &&
162 __vcpu_write_sys_reg_to_cpu(val, reg))
165 __vcpu_sys_reg(vcpu, reg) = val;
168 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
169 static u32 cache_levels;
171 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
172 #define CSSELR_MAX 12
174 /* Which cache CCSIDR represents depends on CSSELR value. */
175 static u32 get_ccsidr(u32 csselr)
179 /* Make sure noone else changes CSSELR during this! */
181 write_sysreg(csselr, csselr_el1);
183 ccsidr = read_sysreg(ccsidr_el1);
190 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
192 static bool access_dcsw(struct kvm_vcpu *vcpu,
193 struct sys_reg_params *p,
194 const struct sys_reg_desc *r)
197 return read_from_write_only(vcpu, p, r);
200 * Only track S/W ops if we don't have FWB. It still indicates
201 * that the guest is a bit broken (S/W operations should only
202 * be done by firmware, knowing that there is only a single
203 * CPU left in the system, and certainly not from non-secure
206 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
207 kvm_set_way_flush(vcpu);
213 * Generic accessor for VM registers. Only called as long as HCR_TVM
214 * is set. If the guest enables the MMU, we stop trapping the VM
215 * sys_regs and leave it in complete control of the caches.
217 static bool access_vm_reg(struct kvm_vcpu *vcpu,
218 struct sys_reg_params *p,
219 const struct sys_reg_desc *r)
221 bool was_enabled = vcpu_has_cache_enabled(vcpu);
225 BUG_ON(!p->is_write);
227 /* See the 32bit mapping in kvm_host.h */
231 if (!p->is_aarch32 || !p->is_32bit) {
234 val = vcpu_read_sys_reg(vcpu, reg);
236 val = (p->regval << 32) | (u64)lower_32_bits(val);
238 val = ((u64)upper_32_bits(val) << 32) |
239 lower_32_bits(p->regval);
241 vcpu_write_sys_reg(vcpu, val, reg);
243 kvm_toggle_cache(vcpu, was_enabled);
247 static bool access_actlr(struct kvm_vcpu *vcpu,
248 struct sys_reg_params *p,
249 const struct sys_reg_desc *r)
252 return ignore_write(vcpu, p);
254 p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
258 p->regval = upper_32_bits(p->regval);
260 p->regval = lower_32_bits(p->regval);
267 * Trap handler for the GICv3 SGI generation system register.
268 * Forward the request to the VGIC emulation.
269 * The cp15_64 code makes sure this automatically works
270 * for both AArch64 and AArch32 accesses.
272 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
273 struct sys_reg_params *p,
274 const struct sys_reg_desc *r)
279 return read_from_write_only(vcpu, p, r);
282 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
283 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
284 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
285 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
290 default: /* Keep GCC quiet */
291 case 0: /* ICC_SGI1R */
294 case 1: /* ICC_ASGI1R */
295 case 2: /* ICC_SGI0R */
301 default: /* Keep GCC quiet */
302 case 5: /* ICC_SGI1R_EL1 */
305 case 6: /* ICC_ASGI1R_EL1 */
306 case 7: /* ICC_SGI0R_EL1 */
312 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
317 static bool access_gic_sre(struct kvm_vcpu *vcpu,
318 struct sys_reg_params *p,
319 const struct sys_reg_desc *r)
322 return ignore_write(vcpu, p);
324 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
328 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
329 struct sys_reg_params *p,
330 const struct sys_reg_desc *r)
333 return ignore_write(vcpu, p);
335 return read_zero(vcpu, p);
339 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
340 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
341 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
342 * treat it separately.
344 static bool trap_loregion(struct kvm_vcpu *vcpu,
345 struct sys_reg_params *p,
346 const struct sys_reg_desc *r)
348 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
349 u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
350 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
352 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
353 kvm_inject_undefined(vcpu);
357 if (p->is_write && sr == SYS_LORID_EL1)
358 return write_to_read_only(vcpu, p, r);
360 return trap_raz_wi(vcpu, p, r);
363 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
364 struct sys_reg_params *p,
365 const struct sys_reg_desc *r)
368 return ignore_write(vcpu, p);
370 p->regval = (1 << 3);
375 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
376 struct sys_reg_params *p,
377 const struct sys_reg_desc *r)
380 return ignore_write(vcpu, p);
382 p->regval = read_sysreg(dbgauthstatus_el1);
388 * We want to avoid world-switching all the DBG registers all the
391 * - If we've touched any debug register, it is likely that we're
392 * going to touch more of them. It then makes sense to disable the
393 * traps and start doing the save/restore dance
394 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
395 * then mandatory to save/restore the registers, as the guest
398 * For this, we use a DIRTY bit, indicating the guest has modified the
399 * debug registers, used as follow:
402 * - If the dirty bit is set (because we're coming back from trapping),
403 * disable the traps, save host registers, restore guest registers.
404 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
405 * set the dirty bit, disable the traps, save host registers,
406 * restore guest registers.
407 * - Otherwise, enable the traps
410 * - If the dirty bit is set, save guest registers, restore host
411 * registers and clear the dirty bit. This ensure that the host can
412 * now use the debug registers.
414 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
415 struct sys_reg_params *p,
416 const struct sys_reg_desc *r)
419 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
420 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
422 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
425 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
431 * reg_to_dbg/dbg_to_reg
433 * A 32 bit write to a debug register leave top bits alone
434 * A 32 bit read from a debug register only returns the bottom bits
436 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
437 * hyp.S code switches between host and guest values in future.
439 static void reg_to_dbg(struct kvm_vcpu *vcpu,
440 struct sys_reg_params *p,
447 val |= ((*dbg_reg >> 32) << 32);
451 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
454 static void dbg_to_reg(struct kvm_vcpu *vcpu,
455 struct sys_reg_params *p,
458 p->regval = *dbg_reg;
460 p->regval &= 0xffffffffUL;
463 static bool trap_bvr(struct kvm_vcpu *vcpu,
464 struct sys_reg_params *p,
465 const struct sys_reg_desc *rd)
467 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
470 reg_to_dbg(vcpu, p, dbg_reg);
472 dbg_to_reg(vcpu, p, dbg_reg);
474 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
479 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
480 const struct kvm_one_reg *reg, void __user *uaddr)
482 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
484 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
489 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
490 const struct kvm_one_reg *reg, void __user *uaddr)
492 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
494 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
499 static void reset_bvr(struct kvm_vcpu *vcpu,
500 const struct sys_reg_desc *rd)
502 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
505 static bool trap_bcr(struct kvm_vcpu *vcpu,
506 struct sys_reg_params *p,
507 const struct sys_reg_desc *rd)
509 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
512 reg_to_dbg(vcpu, p, dbg_reg);
514 dbg_to_reg(vcpu, p, dbg_reg);
516 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
521 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
522 const struct kvm_one_reg *reg, void __user *uaddr)
524 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
526 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
532 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
533 const struct kvm_one_reg *reg, void __user *uaddr)
535 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
537 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
542 static void reset_bcr(struct kvm_vcpu *vcpu,
543 const struct sys_reg_desc *rd)
545 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
548 static bool trap_wvr(struct kvm_vcpu *vcpu,
549 struct sys_reg_params *p,
550 const struct sys_reg_desc *rd)
552 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
555 reg_to_dbg(vcpu, p, dbg_reg);
557 dbg_to_reg(vcpu, p, dbg_reg);
559 trace_trap_reg(__func__, rd->reg, p->is_write,
560 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
565 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
566 const struct kvm_one_reg *reg, void __user *uaddr)
568 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
570 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
575 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
576 const struct kvm_one_reg *reg, void __user *uaddr)
578 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
580 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
585 static void reset_wvr(struct kvm_vcpu *vcpu,
586 const struct sys_reg_desc *rd)
588 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
591 static bool trap_wcr(struct kvm_vcpu *vcpu,
592 struct sys_reg_params *p,
593 const struct sys_reg_desc *rd)
595 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
598 reg_to_dbg(vcpu, p, dbg_reg);
600 dbg_to_reg(vcpu, p, dbg_reg);
602 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
607 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
608 const struct kvm_one_reg *reg, void __user *uaddr)
610 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
612 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
617 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
618 const struct kvm_one_reg *reg, void __user *uaddr)
620 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
622 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
627 static void reset_wcr(struct kvm_vcpu *vcpu,
628 const struct sys_reg_desc *rd)
630 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
633 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
635 u64 amair = read_sysreg(amair_el1);
636 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
639 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
641 u64 actlr = read_sysreg(actlr_el1);
642 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
645 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
650 * Map the vcpu_id into the first three affinity level fields of
651 * the MPIDR. We limit the number of VCPUs in level 0 due to a
652 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
653 * of the GICv3 to be able to address each CPU directly when
656 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
657 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
658 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
659 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
662 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
666 pmcr = read_sysreg(pmcr_el0);
668 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
669 * except PMCR.E resetting to zero.
671 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
672 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
673 if (!system_supports_32bit_el0())
674 val |= ARMV8_PMU_PMCR_LC;
675 __vcpu_sys_reg(vcpu, r->reg) = val;
678 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
680 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
681 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
684 kvm_inject_undefined(vcpu);
689 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
691 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
694 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
696 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
699 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
701 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
704 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
706 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
709 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
710 const struct sys_reg_desc *r)
714 if (!kvm_arm_pmu_v3_ready(vcpu))
715 return trap_raz_wi(vcpu, p, r);
717 if (pmu_access_el0_disabled(vcpu))
721 /* Only update writeable bits of PMCR */
722 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
723 val &= ~ARMV8_PMU_PMCR_MASK;
724 val |= p->regval & ARMV8_PMU_PMCR_MASK;
725 if (!system_supports_32bit_el0())
726 val |= ARMV8_PMU_PMCR_LC;
727 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
728 kvm_pmu_handle_pmcr(vcpu, val);
729 kvm_vcpu_pmu_restore_guest(vcpu);
731 /* PMCR.P & PMCR.C are RAZ */
732 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
733 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
740 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
741 const struct sys_reg_desc *r)
743 if (!kvm_arm_pmu_v3_ready(vcpu))
744 return trap_raz_wi(vcpu, p, r);
746 if (pmu_access_event_counter_el0_disabled(vcpu))
750 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
752 /* return PMSELR.SEL field */
753 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
754 & ARMV8_PMU_COUNTER_MASK;
759 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
760 const struct sys_reg_desc *r)
764 if (!kvm_arm_pmu_v3_ready(vcpu))
765 return trap_raz_wi(vcpu, p, r);
769 if (pmu_access_el0_disabled(vcpu))
773 pmceid = read_sysreg(pmceid0_el0);
775 pmceid = read_sysreg(pmceid1_el0);
782 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
786 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
787 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
788 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
789 kvm_inject_undefined(vcpu);
796 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
797 struct sys_reg_params *p,
798 const struct sys_reg_desc *r)
802 if (!kvm_arm_pmu_v3_ready(vcpu))
803 return trap_raz_wi(vcpu, p, r);
805 if (r->CRn == 9 && r->CRm == 13) {
808 if (pmu_access_event_counter_el0_disabled(vcpu))
811 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
812 & ARMV8_PMU_COUNTER_MASK;
813 } else if (r->Op2 == 0) {
815 if (pmu_access_cycle_counter_el0_disabled(vcpu))
818 idx = ARMV8_PMU_CYCLE_IDX;
822 } else if (r->CRn == 0 && r->CRm == 9) {
824 if (pmu_access_event_counter_el0_disabled(vcpu))
827 idx = ARMV8_PMU_CYCLE_IDX;
828 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
830 if (pmu_access_event_counter_el0_disabled(vcpu))
833 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
838 if (!pmu_counter_idx_valid(vcpu, idx))
842 if (pmu_access_el0_disabled(vcpu))
845 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
847 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
853 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
854 const struct sys_reg_desc *r)
858 if (!kvm_arm_pmu_v3_ready(vcpu))
859 return trap_raz_wi(vcpu, p, r);
861 if (pmu_access_el0_disabled(vcpu))
864 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
866 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
867 reg = PMEVTYPER0_EL0 + idx;
868 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
869 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
870 if (idx == ARMV8_PMU_CYCLE_IDX)
874 reg = PMEVTYPER0_EL0 + idx;
879 if (!pmu_counter_idx_valid(vcpu, idx))
883 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
884 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
885 kvm_vcpu_pmu_restore_guest(vcpu);
887 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
893 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
894 const struct sys_reg_desc *r)
898 if (!kvm_arm_pmu_v3_ready(vcpu))
899 return trap_raz_wi(vcpu, p, r);
901 if (pmu_access_el0_disabled(vcpu))
904 mask = kvm_pmu_valid_counter_mask(vcpu);
906 val = p->regval & mask;
908 /* accessing PMCNTENSET_EL0 */
909 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
910 kvm_pmu_enable_counter_mask(vcpu, val);
911 kvm_vcpu_pmu_restore_guest(vcpu);
913 /* accessing PMCNTENCLR_EL0 */
914 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
915 kvm_pmu_disable_counter_mask(vcpu, val);
918 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
924 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
925 const struct sys_reg_desc *r)
927 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
929 if (!kvm_arm_pmu_v3_ready(vcpu))
930 return trap_raz_wi(vcpu, p, r);
932 if (!vcpu_mode_priv(vcpu)) {
933 kvm_inject_undefined(vcpu);
938 u64 val = p->regval & mask;
941 /* accessing PMINTENSET_EL1 */
942 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
944 /* accessing PMINTENCLR_EL1 */
945 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
947 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
953 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
954 const struct sys_reg_desc *r)
956 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
958 if (!kvm_arm_pmu_v3_ready(vcpu))
959 return trap_raz_wi(vcpu, p, r);
961 if (pmu_access_el0_disabled(vcpu))
966 /* accessing PMOVSSET_EL0 */
967 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
969 /* accessing PMOVSCLR_EL0 */
970 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
972 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
978 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
979 const struct sys_reg_desc *r)
983 if (!kvm_arm_pmu_v3_ready(vcpu))
984 return trap_raz_wi(vcpu, p, r);
987 return read_from_write_only(vcpu, p, r);
989 if (pmu_write_swinc_el0_disabled(vcpu))
992 mask = kvm_pmu_valid_counter_mask(vcpu);
993 kvm_pmu_software_increment(vcpu, p->regval & mask);
997 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
998 const struct sys_reg_desc *r)
1000 if (!kvm_arm_pmu_v3_ready(vcpu))
1001 return trap_raz_wi(vcpu, p, r);
1004 if (!vcpu_mode_priv(vcpu)) {
1005 kvm_inject_undefined(vcpu);
1009 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1010 p->regval & ARMV8_PMU_USERENR_MASK;
1012 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1013 & ARMV8_PMU_USERENR_MASK;
1019 #define reg_to_encoding(x) \
1020 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
1021 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
1023 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1024 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1025 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1026 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1027 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1028 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1029 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1030 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1031 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1032 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1034 /* Macro to expand the PMEVCNTRn_EL0 register */
1035 #define PMU_PMEVCNTR_EL0(n) \
1036 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
1037 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
1039 /* Macro to expand the PMEVTYPERn_EL0 register */
1040 #define PMU_PMEVTYPER_EL0(n) \
1041 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
1042 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
1044 static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1045 const struct sys_reg_desc *r)
1047 kvm_inject_undefined(vcpu);
1052 /* Macro to expand the AMU counter and type registers*/
1053 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu }
1054 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu }
1055 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu }
1056 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu }
1058 static bool trap_ptrauth(struct kvm_vcpu *vcpu,
1059 struct sys_reg_params *p,
1060 const struct sys_reg_desc *rd)
1063 * If we land here, that is because we didn't fixup the access on exit
1064 * by allowing the PtrAuth sysregs. The only way this happens is when
1065 * the guest does not have PtrAuth support enabled.
1067 kvm_inject_undefined(vcpu);
1072 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1073 const struct sys_reg_desc *rd)
1075 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1078 #define __PTRAUTH_KEY(k) \
1079 { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
1080 .visibility = ptrauth_visibility}
1082 #define PTRAUTH_KEY(k) \
1083 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1084 __PTRAUTH_KEY(k ## KEYHI_EL1)
1086 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1087 struct sys_reg_params *p,
1088 const struct sys_reg_desc *r)
1090 enum kvm_arch_timers tmr;
1091 enum kvm_arch_timer_regs treg;
1092 u64 reg = reg_to_encoding(r);
1095 case SYS_CNTP_TVAL_EL0:
1096 case SYS_AARCH32_CNTP_TVAL:
1098 treg = TIMER_REG_TVAL;
1100 case SYS_CNTP_CTL_EL0:
1101 case SYS_AARCH32_CNTP_CTL:
1103 treg = TIMER_REG_CTL;
1105 case SYS_CNTP_CVAL_EL0:
1106 case SYS_AARCH32_CNTP_CVAL:
1108 treg = TIMER_REG_CVAL;
1115 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1117 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1122 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1123 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1124 struct sys_reg_desc const *r, bool raz)
1126 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
1127 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
1128 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
1130 if (id == SYS_ID_AA64PFR0_EL1) {
1131 if (!vcpu_has_sve(vcpu))
1132 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
1133 val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
1134 if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
1135 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
1136 val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
1137 } else if (id == SYS_ID_AA64PFR1_EL1) {
1138 val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
1139 } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
1140 val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
1141 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
1142 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
1143 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
1144 } else if (id == SYS_ID_AA64DFR0_EL1) {
1145 /* Limit guests to PMUv3 for ARMv8.1 */
1146 val = cpuid_feature_cap_perfmon_field(val,
1147 ID_AA64DFR0_PMUVER_SHIFT,
1148 ID_AA64DFR0_PMUVER_8_1);
1149 } else if (id == SYS_ID_DFR0_EL1) {
1150 /* Limit guests to PMUv3 for ARMv8.1 */
1151 val = cpuid_feature_cap_perfmon_field(val,
1152 ID_DFR0_PERFMON_SHIFT,
1153 ID_DFR0_PERFMON_8_1);
1159 /* cpufeature ID register access trap handlers */
1161 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1162 struct sys_reg_params *p,
1163 const struct sys_reg_desc *r,
1167 return write_to_read_only(vcpu, p, r);
1169 p->regval = read_id_reg(vcpu, r, raz);
1173 static bool access_id_reg(struct kvm_vcpu *vcpu,
1174 struct sys_reg_params *p,
1175 const struct sys_reg_desc *r)
1177 return __access_id_reg(vcpu, p, r, false);
1180 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1181 struct sys_reg_params *p,
1182 const struct sys_reg_desc *r)
1184 return __access_id_reg(vcpu, p, r, true);
1187 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
1188 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
1189 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
1191 /* Visibility overrides for SVE-specific control registers */
1192 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1193 const struct sys_reg_desc *rd)
1195 if (vcpu_has_sve(vcpu))
1198 return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
1201 /* Visibility overrides for SVE-specific ID registers */
1202 static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
1203 const struct sys_reg_desc *rd)
1205 if (vcpu_has_sve(vcpu))
1208 return REG_HIDDEN_USER;
1211 /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
1212 static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
1214 if (!vcpu_has_sve(vcpu))
1217 return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
1220 static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1221 struct sys_reg_params *p,
1222 const struct sys_reg_desc *rd)
1225 return write_to_read_only(vcpu, p, rd);
1227 p->regval = guest_id_aa64zfr0_el1(vcpu);
1231 static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1232 const struct sys_reg_desc *rd,
1233 const struct kvm_one_reg *reg, void __user *uaddr)
1237 if (WARN_ON(!vcpu_has_sve(vcpu)))
1240 val = guest_id_aa64zfr0_el1(vcpu);
1241 return reg_to_user(uaddr, &val, reg->id);
1244 static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
1245 const struct sys_reg_desc *rd,
1246 const struct kvm_one_reg *reg, void __user *uaddr)
1248 const u64 id = sys_reg_to_index(rd);
1252 if (WARN_ON(!vcpu_has_sve(vcpu)))
1255 err = reg_from_user(&val, uaddr, id);
1259 /* This is what we mean by invariant: you can't change it. */
1260 if (val != guest_id_aa64zfr0_el1(vcpu))
1267 * cpufeature ID register user accessors
1269 * For now, these registers are immutable for userspace, so no values
1270 * are stored, and for set_id_reg() we don't allow the effective value
1273 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1274 const struct sys_reg_desc *rd, void __user *uaddr,
1277 const u64 id = sys_reg_to_index(rd);
1278 const u64 val = read_id_reg(vcpu, rd, raz);
1280 return reg_to_user(uaddr, &val, id);
1283 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1284 const struct sys_reg_desc *rd, void __user *uaddr,
1287 const u64 id = sys_reg_to_index(rd);
1291 err = reg_from_user(&val, uaddr, id);
1295 /* This is what we mean by invariant: you can't change it. */
1296 if (val != read_id_reg(vcpu, rd, raz))
1302 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1303 const struct kvm_one_reg *reg, void __user *uaddr)
1305 return __get_id_reg(vcpu, rd, uaddr, false);
1308 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1309 const struct kvm_one_reg *reg, void __user *uaddr)
1311 return __set_id_reg(vcpu, rd, uaddr, false);
1314 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1315 const struct kvm_one_reg *reg, void __user *uaddr)
1317 return __get_id_reg(vcpu, rd, uaddr, true);
1320 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1321 const struct kvm_one_reg *reg, void __user *uaddr)
1323 return __set_id_reg(vcpu, rd, uaddr, true);
1326 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1327 const struct sys_reg_desc *r)
1330 return write_to_read_only(vcpu, p, r);
1332 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1336 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1337 const struct sys_reg_desc *r)
1340 return write_to_read_only(vcpu, p, r);
1342 p->regval = read_sysreg(clidr_el1);
1346 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1347 const struct sys_reg_desc *r)
1351 /* See the 32bit mapping in kvm_host.h */
1356 vcpu_write_sys_reg(vcpu, p->regval, reg);
1358 p->regval = vcpu_read_sys_reg(vcpu, reg);
1362 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1363 const struct sys_reg_desc *r)
1368 return write_to_read_only(vcpu, p, r);
1370 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1371 p->regval = get_ccsidr(csselr);
1374 * Guests should not be doing cache operations by set/way at all, and
1375 * for this reason, we trap them and attempt to infer the intent, so
1376 * that we can flush the entire guest's address space at the appropriate
1378 * To prevent this trapping from causing performance problems, let's
1379 * expose the geometry of all data and unified caches (which are
1380 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1381 * [If guests should attempt to infer aliasing properties from the
1382 * geometry (which is not permitted by the architecture), they would
1383 * only do so for virtually indexed caches.]
1385 if (!(csselr & 1)) // data or unified cache
1386 p->regval &= ~GENMASK(27, 3);
1390 static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1391 const struct sys_reg_desc *r)
1393 kvm_inject_undefined(vcpu);
1397 /* sys_reg_desc initialiser for known cpufeature ID registers */
1398 #define ID_SANITISED(name) { \
1399 SYS_DESC(SYS_##name), \
1400 .access = access_id_reg, \
1401 .get_user = get_id_reg, \
1402 .set_user = set_id_reg, \
1406 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1407 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1408 * (1 <= crm < 8, 0 <= Op2 < 8).
1410 #define ID_UNALLOCATED(crm, op2) { \
1411 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1412 .access = access_raz_id_reg, \
1413 .get_user = get_raz_id_reg, \
1414 .set_user = set_raz_id_reg, \
1418 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1419 * For now, these are exposed just like unallocated ID regs: they appear
1420 * RAZ for the guest.
1422 #define ID_HIDDEN(name) { \
1423 SYS_DESC(SYS_##name), \
1424 .access = access_raz_id_reg, \
1425 .get_user = get_raz_id_reg, \
1426 .set_user = set_raz_id_reg, \
1430 * Architected system registers.
1431 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1433 * Debug handling: We do trap most, if not all debug related system
1434 * registers. The implementation is good enough to ensure that a guest
1435 * can use these with minimal performance degradation. The drawback is
1436 * that we don't implement any of the external debug, none of the
1437 * OSlock protocol. This should be revisited if we ever encounter a
1438 * more demanding guest...
1440 static const struct sys_reg_desc sys_reg_descs[] = {
1441 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1442 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1443 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1445 DBG_BCR_BVR_WCR_WVR_EL1(0),
1446 DBG_BCR_BVR_WCR_WVR_EL1(1),
1447 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1448 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1449 DBG_BCR_BVR_WCR_WVR_EL1(2),
1450 DBG_BCR_BVR_WCR_WVR_EL1(3),
1451 DBG_BCR_BVR_WCR_WVR_EL1(4),
1452 DBG_BCR_BVR_WCR_WVR_EL1(5),
1453 DBG_BCR_BVR_WCR_WVR_EL1(6),
1454 DBG_BCR_BVR_WCR_WVR_EL1(7),
1455 DBG_BCR_BVR_WCR_WVR_EL1(8),
1456 DBG_BCR_BVR_WCR_WVR_EL1(9),
1457 DBG_BCR_BVR_WCR_WVR_EL1(10),
1458 DBG_BCR_BVR_WCR_WVR_EL1(11),
1459 DBG_BCR_BVR_WCR_WVR_EL1(12),
1460 DBG_BCR_BVR_WCR_WVR_EL1(13),
1461 DBG_BCR_BVR_WCR_WVR_EL1(14),
1462 DBG_BCR_BVR_WCR_WVR_EL1(15),
1464 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1465 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1466 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1467 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1468 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1469 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1470 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1471 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1473 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1474 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1475 // DBGDTR[TR]X_EL0 share the same encoding
1476 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1478 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1480 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1483 * ID regs: all ID_SANITISED() entries here must have corresponding
1484 * entries in arm64_ftr_regs[].
1487 /* AArch64 mappings of the AArch32 ID registers */
1489 ID_SANITISED(ID_PFR0_EL1),
1490 ID_SANITISED(ID_PFR1_EL1),
1491 ID_SANITISED(ID_DFR0_EL1),
1492 ID_HIDDEN(ID_AFR0_EL1),
1493 ID_SANITISED(ID_MMFR0_EL1),
1494 ID_SANITISED(ID_MMFR1_EL1),
1495 ID_SANITISED(ID_MMFR2_EL1),
1496 ID_SANITISED(ID_MMFR3_EL1),
1499 ID_SANITISED(ID_ISAR0_EL1),
1500 ID_SANITISED(ID_ISAR1_EL1),
1501 ID_SANITISED(ID_ISAR2_EL1),
1502 ID_SANITISED(ID_ISAR3_EL1),
1503 ID_SANITISED(ID_ISAR4_EL1),
1504 ID_SANITISED(ID_ISAR5_EL1),
1505 ID_SANITISED(ID_MMFR4_EL1),
1506 ID_SANITISED(ID_ISAR6_EL1),
1509 ID_SANITISED(MVFR0_EL1),
1510 ID_SANITISED(MVFR1_EL1),
1511 ID_SANITISED(MVFR2_EL1),
1512 ID_UNALLOCATED(3,3),
1513 ID_SANITISED(ID_PFR2_EL1),
1514 ID_HIDDEN(ID_DFR1_EL1),
1515 ID_SANITISED(ID_MMFR5_EL1),
1516 ID_UNALLOCATED(3,7),
1518 /* AArch64 ID registers */
1520 ID_SANITISED(ID_AA64PFR0_EL1),
1521 ID_SANITISED(ID_AA64PFR1_EL1),
1522 ID_UNALLOCATED(4,2),
1523 ID_UNALLOCATED(4,3),
1524 { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
1525 ID_UNALLOCATED(4,5),
1526 ID_UNALLOCATED(4,6),
1527 ID_UNALLOCATED(4,7),
1530 ID_SANITISED(ID_AA64DFR0_EL1),
1531 ID_SANITISED(ID_AA64DFR1_EL1),
1532 ID_UNALLOCATED(5,2),
1533 ID_UNALLOCATED(5,3),
1534 ID_HIDDEN(ID_AA64AFR0_EL1),
1535 ID_HIDDEN(ID_AA64AFR1_EL1),
1536 ID_UNALLOCATED(5,6),
1537 ID_UNALLOCATED(5,7),
1540 ID_SANITISED(ID_AA64ISAR0_EL1),
1541 ID_SANITISED(ID_AA64ISAR1_EL1),
1542 ID_UNALLOCATED(6,2),
1543 ID_UNALLOCATED(6,3),
1544 ID_UNALLOCATED(6,4),
1545 ID_UNALLOCATED(6,5),
1546 ID_UNALLOCATED(6,6),
1547 ID_UNALLOCATED(6,7),
1550 ID_SANITISED(ID_AA64MMFR0_EL1),
1551 ID_SANITISED(ID_AA64MMFR1_EL1),
1552 ID_SANITISED(ID_AA64MMFR2_EL1),
1553 ID_UNALLOCATED(7,3),
1554 ID_UNALLOCATED(7,4),
1555 ID_UNALLOCATED(7,5),
1556 ID_UNALLOCATED(7,6),
1557 ID_UNALLOCATED(7,7),
1559 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1560 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1561 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1563 { SYS_DESC(SYS_RGSR_EL1), access_mte_regs },
1564 { SYS_DESC(SYS_GCR_EL1), access_mte_regs },
1566 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1567 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1568 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1569 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1577 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1578 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1579 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1581 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1582 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1583 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1584 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1585 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1586 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1587 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1588 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1590 { SYS_DESC(SYS_TFSR_EL1), access_mte_regs },
1591 { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs },
1593 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1594 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1596 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1597 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1599 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1600 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1602 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1603 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1604 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1605 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1606 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1608 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1609 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1611 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1612 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1613 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1614 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1615 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1616 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1617 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1618 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1619 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1620 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1621 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1622 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1624 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1625 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1627 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1629 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1630 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1631 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1632 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1634 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
1635 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1636 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1637 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1638 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1639 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1640 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1641 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1642 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1643 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1644 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1646 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1647 * in 32bit mode. Here we choose to reset it as zero for consistency.
1649 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1650 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1652 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1653 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1655 { SYS_DESC(SYS_AMCR_EL0), access_amu },
1656 { SYS_DESC(SYS_AMCFGR_EL0), access_amu },
1657 { SYS_DESC(SYS_AMCGCR_EL0), access_amu },
1658 { SYS_DESC(SYS_AMUSERENR_EL0), access_amu },
1659 { SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu },
1660 { SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu },
1661 { SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu },
1662 { SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu },
1663 AMU_AMEVCNTR0_EL0(0),
1664 AMU_AMEVCNTR0_EL0(1),
1665 AMU_AMEVCNTR0_EL0(2),
1666 AMU_AMEVCNTR0_EL0(3),
1667 AMU_AMEVCNTR0_EL0(4),
1668 AMU_AMEVCNTR0_EL0(5),
1669 AMU_AMEVCNTR0_EL0(6),
1670 AMU_AMEVCNTR0_EL0(7),
1671 AMU_AMEVCNTR0_EL0(8),
1672 AMU_AMEVCNTR0_EL0(9),
1673 AMU_AMEVCNTR0_EL0(10),
1674 AMU_AMEVCNTR0_EL0(11),
1675 AMU_AMEVCNTR0_EL0(12),
1676 AMU_AMEVCNTR0_EL0(13),
1677 AMU_AMEVCNTR0_EL0(14),
1678 AMU_AMEVCNTR0_EL0(15),
1679 AMU_AMEVTYPER0_EL0(0),
1680 AMU_AMEVTYPER0_EL0(1),
1681 AMU_AMEVTYPER0_EL0(2),
1682 AMU_AMEVTYPER0_EL0(3),
1683 AMU_AMEVTYPER0_EL0(4),
1684 AMU_AMEVTYPER0_EL0(5),
1685 AMU_AMEVTYPER0_EL0(6),
1686 AMU_AMEVTYPER0_EL0(7),
1687 AMU_AMEVTYPER0_EL0(8),
1688 AMU_AMEVTYPER0_EL0(9),
1689 AMU_AMEVTYPER0_EL0(10),
1690 AMU_AMEVTYPER0_EL0(11),
1691 AMU_AMEVTYPER0_EL0(12),
1692 AMU_AMEVTYPER0_EL0(13),
1693 AMU_AMEVTYPER0_EL0(14),
1694 AMU_AMEVTYPER0_EL0(15),
1695 AMU_AMEVCNTR1_EL0(0),
1696 AMU_AMEVCNTR1_EL0(1),
1697 AMU_AMEVCNTR1_EL0(2),
1698 AMU_AMEVCNTR1_EL0(3),
1699 AMU_AMEVCNTR1_EL0(4),
1700 AMU_AMEVCNTR1_EL0(5),
1701 AMU_AMEVCNTR1_EL0(6),
1702 AMU_AMEVCNTR1_EL0(7),
1703 AMU_AMEVCNTR1_EL0(8),
1704 AMU_AMEVCNTR1_EL0(9),
1705 AMU_AMEVCNTR1_EL0(10),
1706 AMU_AMEVCNTR1_EL0(11),
1707 AMU_AMEVCNTR1_EL0(12),
1708 AMU_AMEVCNTR1_EL0(13),
1709 AMU_AMEVCNTR1_EL0(14),
1710 AMU_AMEVCNTR1_EL0(15),
1711 AMU_AMEVTYPER1_EL0(0),
1712 AMU_AMEVTYPER1_EL0(1),
1713 AMU_AMEVTYPER1_EL0(2),
1714 AMU_AMEVTYPER1_EL0(3),
1715 AMU_AMEVTYPER1_EL0(4),
1716 AMU_AMEVTYPER1_EL0(5),
1717 AMU_AMEVTYPER1_EL0(6),
1718 AMU_AMEVTYPER1_EL0(7),
1719 AMU_AMEVTYPER1_EL0(8),
1720 AMU_AMEVTYPER1_EL0(9),
1721 AMU_AMEVTYPER1_EL0(10),
1722 AMU_AMEVTYPER1_EL0(11),
1723 AMU_AMEVTYPER1_EL0(12),
1724 AMU_AMEVTYPER1_EL0(13),
1725 AMU_AMEVTYPER1_EL0(14),
1726 AMU_AMEVTYPER1_EL0(15),
1728 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1729 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1730 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1733 PMU_PMEVCNTR_EL0(0),
1734 PMU_PMEVCNTR_EL0(1),
1735 PMU_PMEVCNTR_EL0(2),
1736 PMU_PMEVCNTR_EL0(3),
1737 PMU_PMEVCNTR_EL0(4),
1738 PMU_PMEVCNTR_EL0(5),
1739 PMU_PMEVCNTR_EL0(6),
1740 PMU_PMEVCNTR_EL0(7),
1741 PMU_PMEVCNTR_EL0(8),
1742 PMU_PMEVCNTR_EL0(9),
1743 PMU_PMEVCNTR_EL0(10),
1744 PMU_PMEVCNTR_EL0(11),
1745 PMU_PMEVCNTR_EL0(12),
1746 PMU_PMEVCNTR_EL0(13),
1747 PMU_PMEVCNTR_EL0(14),
1748 PMU_PMEVCNTR_EL0(15),
1749 PMU_PMEVCNTR_EL0(16),
1750 PMU_PMEVCNTR_EL0(17),
1751 PMU_PMEVCNTR_EL0(18),
1752 PMU_PMEVCNTR_EL0(19),
1753 PMU_PMEVCNTR_EL0(20),
1754 PMU_PMEVCNTR_EL0(21),
1755 PMU_PMEVCNTR_EL0(22),
1756 PMU_PMEVCNTR_EL0(23),
1757 PMU_PMEVCNTR_EL0(24),
1758 PMU_PMEVCNTR_EL0(25),
1759 PMU_PMEVCNTR_EL0(26),
1760 PMU_PMEVCNTR_EL0(27),
1761 PMU_PMEVCNTR_EL0(28),
1762 PMU_PMEVCNTR_EL0(29),
1763 PMU_PMEVCNTR_EL0(30),
1764 /* PMEVTYPERn_EL0 */
1765 PMU_PMEVTYPER_EL0(0),
1766 PMU_PMEVTYPER_EL0(1),
1767 PMU_PMEVTYPER_EL0(2),
1768 PMU_PMEVTYPER_EL0(3),
1769 PMU_PMEVTYPER_EL0(4),
1770 PMU_PMEVTYPER_EL0(5),
1771 PMU_PMEVTYPER_EL0(6),
1772 PMU_PMEVTYPER_EL0(7),
1773 PMU_PMEVTYPER_EL0(8),
1774 PMU_PMEVTYPER_EL0(9),
1775 PMU_PMEVTYPER_EL0(10),
1776 PMU_PMEVTYPER_EL0(11),
1777 PMU_PMEVTYPER_EL0(12),
1778 PMU_PMEVTYPER_EL0(13),
1779 PMU_PMEVTYPER_EL0(14),
1780 PMU_PMEVTYPER_EL0(15),
1781 PMU_PMEVTYPER_EL0(16),
1782 PMU_PMEVTYPER_EL0(17),
1783 PMU_PMEVTYPER_EL0(18),
1784 PMU_PMEVTYPER_EL0(19),
1785 PMU_PMEVTYPER_EL0(20),
1786 PMU_PMEVTYPER_EL0(21),
1787 PMU_PMEVTYPER_EL0(22),
1788 PMU_PMEVTYPER_EL0(23),
1789 PMU_PMEVTYPER_EL0(24),
1790 PMU_PMEVTYPER_EL0(25),
1791 PMU_PMEVTYPER_EL0(26),
1792 PMU_PMEVTYPER_EL0(27),
1793 PMU_PMEVTYPER_EL0(28),
1794 PMU_PMEVTYPER_EL0(29),
1795 PMU_PMEVTYPER_EL0(30),
1797 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1798 * in 32bit mode. Here we choose to reset it as zero for consistency.
1800 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1802 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1803 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1804 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1807 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1808 struct sys_reg_params *p,
1809 const struct sys_reg_desc *r)
1812 return ignore_write(vcpu, p);
1814 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1815 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1816 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1818 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1819 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1820 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1821 | (6 << 16) | (el3 << 14) | (el3 << 12));
1826 static bool trap_debug32(struct kvm_vcpu *vcpu,
1827 struct sys_reg_params *p,
1828 const struct sys_reg_desc *r)
1831 vcpu_cp14(vcpu, r->reg) = p->regval;
1832 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1834 p->regval = vcpu_cp14(vcpu, r->reg);
1840 /* AArch32 debug register mappings
1842 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1843 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1845 * All control registers and watchpoint value registers are mapped to
1846 * the lower 32 bits of their AArch64 equivalents. We share the trap
1847 * handlers with the above AArch64 code which checks what mode the
1851 static bool trap_xvr(struct kvm_vcpu *vcpu,
1852 struct sys_reg_params *p,
1853 const struct sys_reg_desc *rd)
1855 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1860 val &= 0xffffffffUL;
1861 val |= p->regval << 32;
1864 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
1866 p->regval = *dbg_reg >> 32;
1869 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1874 #define DBG_BCR_BVR_WCR_WVR(n) \
1876 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1878 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1880 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1882 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1884 #define DBGBXVR(n) \
1885 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1888 * Trapped cp14 registers. We generally ignore most of the external
1889 * debug, on the principle that they don't really make sense to a
1890 * guest. Revisit this one day, would this principle change.
1892 static const struct sys_reg_desc cp14_regs[] = {
1894 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1896 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1898 DBG_BCR_BVR_WCR_WVR(0),
1900 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1901 DBG_BCR_BVR_WCR_WVR(1),
1903 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1905 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1906 DBG_BCR_BVR_WCR_WVR(2),
1907 /* DBGDTR[RT]Xint */
1908 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1909 /* DBGDTR[RT]Xext */
1910 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1911 DBG_BCR_BVR_WCR_WVR(3),
1912 DBG_BCR_BVR_WCR_WVR(4),
1913 DBG_BCR_BVR_WCR_WVR(5),
1915 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1917 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1918 DBG_BCR_BVR_WCR_WVR(6),
1920 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1921 DBG_BCR_BVR_WCR_WVR(7),
1922 DBG_BCR_BVR_WCR_WVR(8),
1923 DBG_BCR_BVR_WCR_WVR(9),
1924 DBG_BCR_BVR_WCR_WVR(10),
1925 DBG_BCR_BVR_WCR_WVR(11),
1926 DBG_BCR_BVR_WCR_WVR(12),
1927 DBG_BCR_BVR_WCR_WVR(13),
1928 DBG_BCR_BVR_WCR_WVR(14),
1929 DBG_BCR_BVR_WCR_WVR(15),
1931 /* DBGDRAR (32bit) */
1932 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1936 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1939 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1943 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1946 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1959 /* DBGDSAR (32bit) */
1960 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1963 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1965 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1967 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1969 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1971 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1973 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1976 /* Trapped cp14 64bit registers */
1977 static const struct sys_reg_desc cp14_64_regs[] = {
1978 /* DBGDRAR (64bit) */
1979 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1981 /* DBGDSAR (64bit) */
1982 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1985 /* Macro to expand the PMEVCNTRn register */
1986 #define PMU_PMEVCNTR(n) \
1988 { Op1(0), CRn(0b1110), \
1989 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1992 /* Macro to expand the PMEVTYPERn register */
1993 #define PMU_PMEVTYPER(n) \
1995 { Op1(0), CRn(0b1110), \
1996 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1997 access_pmu_evtyper }
2000 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2001 * depending on the way they are accessed (as a 32bit or a 64bit
2004 static const struct sys_reg_desc cp15_regs[] = {
2005 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2006 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
2007 { Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr },
2008 { Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr },
2009 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
2010 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
2011 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
2012 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
2013 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
2014 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
2015 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
2016 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
2017 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
2018 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
2021 * DC{C,I,CI}SW operations:
2023 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2024 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2025 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2028 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
2029 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
2030 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
2031 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
2032 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
2033 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
2034 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
2035 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
2036 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
2037 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
2038 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
2039 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
2040 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
2041 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
2042 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
2044 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
2045 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
2046 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
2047 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
2050 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2052 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
2055 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2056 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2123 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
2125 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2126 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2127 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
2130 static const struct sys_reg_desc cp15_64_regs[] = {
2131 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
2132 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
2133 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2134 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
2135 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2136 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2137 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2140 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2145 for (i = 0; i < n; i++) {
2146 if (!is_32 && table[i].reg && !table[i].reset) {
2147 kvm_err("sys_reg table %p entry %d has lacks reset\n",
2152 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2153 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2161 static int match_sys_reg(const void *key, const void *elt)
2163 const unsigned long pval = (unsigned long)key;
2164 const struct sys_reg_desc *r = elt;
2166 return pval - reg_to_encoding(r);
2169 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
2170 const struct sys_reg_desc table[],
2173 unsigned long pval = reg_to_encoding(params);
2175 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
2178 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2180 kvm_inject_undefined(vcpu);
2184 static void perform_access(struct kvm_vcpu *vcpu,
2185 struct sys_reg_params *params,
2186 const struct sys_reg_desc *r)
2188 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2190 /* Check for regs disabled by runtime config */
2191 if (sysreg_hidden_from_guest(vcpu, r)) {
2192 kvm_inject_undefined(vcpu);
2197 * Not having an accessor means that we have configured a trap
2198 * that we don't know how to handle. This certainly qualifies
2199 * as a gross bug that should be fixed right away.
2203 /* Skip instruction if instructed so */
2204 if (likely(r->access(vcpu, params, r)))
2205 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
2209 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2210 * call the corresponding trap handler.
2212 * @params: pointer to the descriptor of the access
2213 * @table: array of trap descriptors
2214 * @num: size of the trap descriptor array
2216 * Return 0 if the access has been handled, and -1 if not.
2218 static int emulate_cp(struct kvm_vcpu *vcpu,
2219 struct sys_reg_params *params,
2220 const struct sys_reg_desc *table,
2223 const struct sys_reg_desc *r;
2226 return -1; /* Not handled */
2228 r = find_reg(params, table, num);
2231 perform_access(vcpu, params, r);
2239 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2240 struct sys_reg_params *params)
2242 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2246 case ESR_ELx_EC_CP15_32:
2247 case ESR_ELx_EC_CP15_64:
2250 case ESR_ELx_EC_CP14_MR:
2251 case ESR_ELx_EC_CP14_64:
2258 print_sys_reg_msg(params,
2259 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2260 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2261 kvm_inject_undefined(vcpu);
2265 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2266 * @vcpu: The VCPU pointer
2267 * @run: The kvm_run struct
2269 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2270 const struct sys_reg_desc *global,
2273 struct sys_reg_params params;
2274 u32 esr = kvm_vcpu_get_esr(vcpu);
2275 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2276 int Rt2 = (esr >> 10) & 0x1f;
2278 params.is_aarch32 = true;
2279 params.is_32bit = false;
2280 params.CRm = (esr >> 1) & 0xf;
2281 params.is_write = ((esr & 1) == 0);
2284 params.Op1 = (esr >> 16) & 0xf;
2289 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2290 * backends between AArch32 and AArch64, we get away with it.
2292 if (params.is_write) {
2293 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2294 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2298 * If the table contains a handler, handle the
2299 * potential register operation in the case of a read and return
2302 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2303 /* Split up the value between registers for the read side */
2304 if (!params.is_write) {
2305 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2306 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2312 unhandled_cp_access(vcpu, ¶ms);
2317 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2318 * @vcpu: The VCPU pointer
2319 * @run: The kvm_run struct
2321 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2322 const struct sys_reg_desc *global,
2325 struct sys_reg_params params;
2326 u32 esr = kvm_vcpu_get_esr(vcpu);
2327 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2329 params.is_aarch32 = true;
2330 params.is_32bit = true;
2331 params.CRm = (esr >> 1) & 0xf;
2332 params.regval = vcpu_get_reg(vcpu, Rt);
2333 params.is_write = ((esr & 1) == 0);
2334 params.CRn = (esr >> 10) & 0xf;
2336 params.Op1 = (esr >> 14) & 0x7;
2337 params.Op2 = (esr >> 17) & 0x7;
2339 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2340 if (!params.is_write)
2341 vcpu_set_reg(vcpu, Rt, params.regval);
2345 unhandled_cp_access(vcpu, ¶ms);
2349 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2351 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2354 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2356 return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
2359 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2361 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2364 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2366 return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
2369 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2371 // See ARM DDI 0487E.a, section D12.3.2
2372 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2375 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2376 struct sys_reg_params *params)
2378 const struct sys_reg_desc *r;
2380 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2383 perform_access(vcpu, params, r);
2384 } else if (is_imp_def_sys_reg(params)) {
2385 kvm_inject_undefined(vcpu);
2387 print_sys_reg_msg(params,
2388 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2389 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2390 kvm_inject_undefined(vcpu);
2396 * kvm_reset_sys_regs - sets system registers to reset value
2397 * @vcpu: The VCPU pointer
2399 * This function finds the right table above and sets the registers on the
2400 * virtual CPU struct to their architecturally defined reset values.
2402 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2406 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2407 if (sys_reg_descs[i].reset)
2408 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2412 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2413 * @vcpu: The VCPU pointer
2415 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2417 struct sys_reg_params params;
2418 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2419 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2422 trace_kvm_handle_sys_reg(esr);
2424 params.is_aarch32 = false;
2425 params.is_32bit = false;
2426 params.Op0 = (esr >> 20) & 3;
2427 params.Op1 = (esr >> 14) & 0x7;
2428 params.CRn = (esr >> 10) & 0xf;
2429 params.CRm = (esr >> 1) & 0xf;
2430 params.Op2 = (esr >> 17) & 0x7;
2431 params.regval = vcpu_get_reg(vcpu, Rt);
2432 params.is_write = !(esr & 1);
2434 ret = emulate_sys_reg(vcpu, ¶ms);
2436 if (!params.is_write)
2437 vcpu_set_reg(vcpu, Rt, params.regval);
2441 /******************************************************************************
2443 *****************************************************************************/
2445 static bool index_to_params(u64 id, struct sys_reg_params *params)
2447 switch (id & KVM_REG_SIZE_MASK) {
2448 case KVM_REG_SIZE_U64:
2449 /* Any unused index bits means it's not valid. */
2450 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2451 | KVM_REG_ARM_COPROC_MASK
2452 | KVM_REG_ARM64_SYSREG_OP0_MASK
2453 | KVM_REG_ARM64_SYSREG_OP1_MASK
2454 | KVM_REG_ARM64_SYSREG_CRN_MASK
2455 | KVM_REG_ARM64_SYSREG_CRM_MASK
2456 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2458 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2459 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2460 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2461 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2462 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2463 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2464 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2465 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2466 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2467 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2474 const struct sys_reg_desc *find_reg_by_id(u64 id,
2475 struct sys_reg_params *params,
2476 const struct sys_reg_desc table[],
2479 if (!index_to_params(id, params))
2482 return find_reg(params, table, num);
2485 /* Decode an index value, and find the sys_reg_desc entry. */
2486 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2489 const struct sys_reg_desc *r;
2490 struct sys_reg_params params;
2492 /* We only do sys_reg for now. */
2493 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2496 if (!index_to_params(id, ¶ms))
2499 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2501 /* Not saved in the sys_reg array and not otherwise accessible? */
2502 if (r && !(r->reg || r->get_user))
2509 * These are the invariant sys_reg registers: we let the guest see the
2510 * host versions of these, so they're part of the guest state.
2512 * A future CPU may provide a mechanism to present different values to
2513 * the guest, or a future kvm may trap them.
2516 #define FUNCTION_INVARIANT(reg) \
2517 static void get_##reg(struct kvm_vcpu *v, \
2518 const struct sys_reg_desc *r) \
2520 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2523 FUNCTION_INVARIANT(midr_el1)
2524 FUNCTION_INVARIANT(revidr_el1)
2525 FUNCTION_INVARIANT(clidr_el1)
2526 FUNCTION_INVARIANT(aidr_el1)
2528 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2530 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2533 /* ->val is filled in by kvm_sys_reg_table_init() */
2534 static struct sys_reg_desc invariant_sys_regs[] = {
2535 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2536 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2537 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2538 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2539 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2542 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2544 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2549 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2551 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2556 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2558 struct sys_reg_params params;
2559 const struct sys_reg_desc *r;
2561 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2562 ARRAY_SIZE(invariant_sys_regs));
2566 return reg_to_user(uaddr, &r->val, id);
2569 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2571 struct sys_reg_params params;
2572 const struct sys_reg_desc *r;
2574 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2576 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2577 ARRAY_SIZE(invariant_sys_regs));
2581 err = reg_from_user(&val, uaddr, id);
2585 /* This is what we mean by invariant: you can't change it. */
2592 static bool is_valid_cache(u32 val)
2596 if (val >= CSSELR_MAX)
2599 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2601 ctype = (cache_levels >> (level * 3)) & 7;
2604 case 0: /* No cache */
2606 case 1: /* Instruction cache only */
2608 case 2: /* Data cache only */
2609 case 4: /* Unified cache */
2611 case 3: /* Separate instruction and data caches */
2613 default: /* Reserved: we can't know instruction or data. */
2618 static int demux_c15_get(u64 id, void __user *uaddr)
2621 u32 __user *uval = uaddr;
2623 /* Fail if we have unknown bits set. */
2624 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2625 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2628 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2629 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2630 if (KVM_REG_SIZE(id) != 4)
2632 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2633 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2634 if (!is_valid_cache(val))
2637 return put_user(get_ccsidr(val), uval);
2643 static int demux_c15_set(u64 id, void __user *uaddr)
2646 u32 __user *uval = uaddr;
2648 /* Fail if we have unknown bits set. */
2649 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2650 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2653 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2654 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2655 if (KVM_REG_SIZE(id) != 4)
2657 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2658 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2659 if (!is_valid_cache(val))
2662 if (get_user(newval, uval))
2665 /* This is also invariant: you can't change it. */
2666 if (newval != get_ccsidr(val))
2674 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2676 const struct sys_reg_desc *r;
2677 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2679 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2680 return demux_c15_get(reg->id, uaddr);
2682 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2685 r = index_to_sys_reg_desc(vcpu, reg->id);
2687 return get_invariant_sys_reg(reg->id, uaddr);
2689 /* Check for regs disabled by runtime config */
2690 if (sysreg_hidden_from_user(vcpu, r))
2694 return (r->get_user)(vcpu, r, reg, uaddr);
2696 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2699 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2701 const struct sys_reg_desc *r;
2702 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2704 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2705 return demux_c15_set(reg->id, uaddr);
2707 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2710 r = index_to_sys_reg_desc(vcpu, reg->id);
2712 return set_invariant_sys_reg(reg->id, uaddr);
2714 /* Check for regs disabled by runtime config */
2715 if (sysreg_hidden_from_user(vcpu, r))
2719 return (r->set_user)(vcpu, r, reg, uaddr);
2721 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2724 static unsigned int num_demux_regs(void)
2726 unsigned int i, count = 0;
2728 for (i = 0; i < CSSELR_MAX; i++)
2729 if (is_valid_cache(i))
2735 static int write_demux_regids(u64 __user *uindices)
2737 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2740 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2741 for (i = 0; i < CSSELR_MAX; i++) {
2742 if (!is_valid_cache(i))
2744 if (put_user(val | i, uindices))
2751 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2753 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2754 KVM_REG_ARM64_SYSREG |
2755 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2756 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2757 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2758 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2759 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2762 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2767 if (put_user(sys_reg_to_index(reg), *uind))
2774 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2775 const struct sys_reg_desc *rd,
2777 unsigned int *total)
2780 * Ignore registers we trap but don't save,
2781 * and for which no custom user accessor is provided.
2783 if (!(rd->reg || rd->get_user))
2786 if (sysreg_hidden_from_user(vcpu, rd))
2789 if (!copy_reg_to_user(rd, uind))
2796 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2797 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2799 const struct sys_reg_desc *i2, *end2;
2800 unsigned int total = 0;
2804 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2806 while (i2 != end2) {
2807 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2814 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2816 return ARRAY_SIZE(invariant_sys_regs)
2818 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2821 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2826 /* Then give them all the invariant registers' indices. */
2827 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2828 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2833 err = walk_sys_regs(vcpu, uindices);
2838 return write_demux_regids(uindices);
2841 void kvm_sys_reg_table_init(void)
2844 struct sys_reg_desc clidr;
2846 /* Make sure tables are unique and in order. */
2847 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
2848 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
2849 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
2850 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
2851 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
2852 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
2854 /* We abuse the reset function to overwrite the table itself. */
2855 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2856 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2859 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2861 * If software reads the Cache Type fields from Ctype1
2862 * upwards, once it has seen a value of 0b000, no caches
2863 * exist at further-out levels of the hierarchy. So, for
2864 * example, if Ctype3 is the first Cache Type field with a
2865 * value of 0b000, the values of Ctype4 to Ctype7 must be
2868 get_clidr_el1(NULL, &clidr); /* Ugly... */
2869 cache_levels = clidr.val;
2870 for (i = 0; i < 7; i++)
2871 if (((cache_levels >> (i*3)) & 7) == 0)
2873 /* Clear all higher bits. */
2874 cache_levels &= (1 << (i*3))-1;