1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/kvm_host.h>
16 #include <linux/printk.h>
17 #include <linux/uaccess.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
23 #include <asm/kvm_arm.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
30 #include <trace/events/kvm.h>
37 * All of this file is extremely similar to the ARM coproc.c, but the
38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted.
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
47 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
48 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
49 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
51 static bool read_from_write_only(struct kvm_vcpu *vcpu,
52 struct sys_reg_params *params,
53 const struct sys_reg_desc *r)
55 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
56 print_sys_reg_instr(params);
57 kvm_inject_undefined(vcpu);
61 static bool write_to_read_only(struct kvm_vcpu *vcpu,
62 struct sys_reg_params *params,
63 const struct sys_reg_desc *r)
65 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
66 print_sys_reg_instr(params);
67 kvm_inject_undefined(vcpu);
71 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
73 u64 val = 0x8badf00d8badf00d;
75 if (vcpu->arch.sysregs_loaded_on_cpu &&
76 __vcpu_read_sys_reg_from_cpu(reg, &val))
79 return __vcpu_sys_reg(vcpu, reg);
82 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
84 if (vcpu->arch.sysregs_loaded_on_cpu &&
85 __vcpu_write_sys_reg_to_cpu(val, reg))
88 __vcpu_sys_reg(vcpu, reg) = val;
91 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
92 static u32 cache_levels;
94 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
97 /* Which cache CCSIDR represents depends on CSSELR value. */
98 static u32 get_ccsidr(u32 csselr)
102 /* Make sure noone else changes CSSELR during this! */
104 write_sysreg(csselr, csselr_el1);
106 ccsidr = read_sysreg(ccsidr_el1);
113 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
115 static bool access_dcsw(struct kvm_vcpu *vcpu,
116 struct sys_reg_params *p,
117 const struct sys_reg_desc *r)
120 return read_from_write_only(vcpu, p, r);
123 * Only track S/W ops if we don't have FWB. It still indicates
124 * that the guest is a bit broken (S/W operations should only
125 * be done by firmware, knowing that there is only a single
126 * CPU left in the system, and certainly not from non-secure
129 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
130 kvm_set_way_flush(vcpu);
135 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
137 switch (r->aarch32_map) {
139 *mask = GENMASK_ULL(31, 0);
143 *mask = GENMASK_ULL(63, 32);
147 *mask = GENMASK_ULL(63, 0);
154 * Generic accessor for VM registers. Only called as long as HCR_TVM
155 * is set. If the guest enables the MMU, we stop trapping the VM
156 * sys_regs and leave it in complete control of the caches.
158 static bool access_vm_reg(struct kvm_vcpu *vcpu,
159 struct sys_reg_params *p,
160 const struct sys_reg_desc *r)
162 bool was_enabled = vcpu_has_cache_enabled(vcpu);
163 u64 val, mask, shift;
165 BUG_ON(!p->is_write);
167 get_access_mask(r, &mask, &shift);
170 val = vcpu_read_sys_reg(vcpu, r->reg);
176 val |= (p->regval & (mask >> shift)) << shift;
177 vcpu_write_sys_reg(vcpu, val, r->reg);
179 kvm_toggle_cache(vcpu, was_enabled);
183 static bool access_actlr(struct kvm_vcpu *vcpu,
184 struct sys_reg_params *p,
185 const struct sys_reg_desc *r)
190 return ignore_write(vcpu, p);
192 get_access_mask(r, &mask, &shift);
193 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
199 * Trap handler for the GICv3 SGI generation system register.
200 * Forward the request to the VGIC emulation.
201 * The cp15_64 code makes sure this automatically works
202 * for both AArch64 and AArch32 accesses.
204 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
205 struct sys_reg_params *p,
206 const struct sys_reg_desc *r)
211 return read_from_write_only(vcpu, p, r);
214 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
215 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
216 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
217 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
220 if (p->Op0 == 0) { /* AArch32 */
222 default: /* Keep GCC quiet */
223 case 0: /* ICC_SGI1R */
226 case 1: /* ICC_ASGI1R */
227 case 2: /* ICC_SGI0R */
231 } else { /* AArch64 */
233 default: /* Keep GCC quiet */
234 case 5: /* ICC_SGI1R_EL1 */
237 case 6: /* ICC_ASGI1R_EL1 */
238 case 7: /* ICC_SGI0R_EL1 */
244 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
249 static bool access_gic_sre(struct kvm_vcpu *vcpu,
250 struct sys_reg_params *p,
251 const struct sys_reg_desc *r)
254 return ignore_write(vcpu, p);
256 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
260 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
261 struct sys_reg_params *p,
262 const struct sys_reg_desc *r)
265 return ignore_write(vcpu, p);
267 return read_zero(vcpu, p);
271 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
272 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
273 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
274 * treat it separately.
276 static bool trap_loregion(struct kvm_vcpu *vcpu,
277 struct sys_reg_params *p,
278 const struct sys_reg_desc *r)
280 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
281 u32 sr = reg_to_encoding(r);
283 if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
284 kvm_inject_undefined(vcpu);
288 if (p->is_write && sr == SYS_LORID_EL1)
289 return write_to_read_only(vcpu, p, r);
291 return trap_raz_wi(vcpu, p, r);
294 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
295 struct sys_reg_params *p,
296 const struct sys_reg_desc *r)
301 return read_from_write_only(vcpu, p, r);
303 /* Forward the OSLK bit to OSLSR */
304 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
305 if (p->regval & SYS_OSLAR_OSLK)
306 oslsr |= SYS_OSLSR_OSLK;
308 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
312 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
313 struct sys_reg_params *p,
314 const struct sys_reg_desc *r)
317 return write_to_read_only(vcpu, p, r);
319 p->regval = __vcpu_sys_reg(vcpu, r->reg);
323 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
324 const struct kvm_one_reg *reg, void __user *uaddr)
326 u64 id = sys_reg_to_index(rd);
330 err = reg_from_user(&val, uaddr, id);
335 * The only modifiable bit is the OSLK bit. Refuse the write if
336 * userspace attempts to change any other bit in the register.
338 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
341 __vcpu_sys_reg(vcpu, rd->reg) = val;
345 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
346 struct sys_reg_params *p,
347 const struct sys_reg_desc *r)
350 return ignore_write(vcpu, p);
352 p->regval = read_sysreg(dbgauthstatus_el1);
358 * We want to avoid world-switching all the DBG registers all the
361 * - If we've touched any debug register, it is likely that we're
362 * going to touch more of them. It then makes sense to disable the
363 * traps and start doing the save/restore dance
364 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
365 * then mandatory to save/restore the registers, as the guest
368 * For this, we use a DIRTY bit, indicating the guest has modified the
369 * debug registers, used as follow:
372 * - If the dirty bit is set (because we're coming back from trapping),
373 * disable the traps, save host registers, restore guest registers.
374 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
375 * set the dirty bit, disable the traps, save host registers,
376 * restore guest registers.
377 * - Otherwise, enable the traps
380 * - If the dirty bit is set, save guest registers, restore host
381 * registers and clear the dirty bit. This ensure that the host can
382 * now use the debug registers.
384 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
385 struct sys_reg_params *p,
386 const struct sys_reg_desc *r)
389 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
390 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
392 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
395 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
401 * reg_to_dbg/dbg_to_reg
403 * A 32 bit write to a debug register leave top bits alone
404 * A 32 bit read from a debug register only returns the bottom bits
406 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
407 * hyp.S code switches between host and guest values in future.
409 static void reg_to_dbg(struct kvm_vcpu *vcpu,
410 struct sys_reg_params *p,
411 const struct sys_reg_desc *rd,
414 u64 mask, shift, val;
416 get_access_mask(rd, &mask, &shift);
420 val |= (p->regval & (mask >> shift)) << shift;
423 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
426 static void dbg_to_reg(struct kvm_vcpu *vcpu,
427 struct sys_reg_params *p,
428 const struct sys_reg_desc *rd,
433 get_access_mask(rd, &mask, &shift);
434 p->regval = (*dbg_reg & mask) >> shift;
437 static bool trap_bvr(struct kvm_vcpu *vcpu,
438 struct sys_reg_params *p,
439 const struct sys_reg_desc *rd)
441 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
444 reg_to_dbg(vcpu, p, rd, dbg_reg);
446 dbg_to_reg(vcpu, p, rd, dbg_reg);
448 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
453 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
454 const struct kvm_one_reg *reg, void __user *uaddr)
456 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
458 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
463 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
464 const struct kvm_one_reg *reg, void __user *uaddr)
466 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
468 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
473 static void reset_bvr(struct kvm_vcpu *vcpu,
474 const struct sys_reg_desc *rd)
476 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
479 static bool trap_bcr(struct kvm_vcpu *vcpu,
480 struct sys_reg_params *p,
481 const struct sys_reg_desc *rd)
483 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
486 reg_to_dbg(vcpu, p, rd, dbg_reg);
488 dbg_to_reg(vcpu, p, rd, dbg_reg);
490 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
495 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
496 const struct kvm_one_reg *reg, void __user *uaddr)
498 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
500 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
506 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
507 const struct kvm_one_reg *reg, void __user *uaddr)
509 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
511 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
516 static void reset_bcr(struct kvm_vcpu *vcpu,
517 const struct sys_reg_desc *rd)
519 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
522 static bool trap_wvr(struct kvm_vcpu *vcpu,
523 struct sys_reg_params *p,
524 const struct sys_reg_desc *rd)
526 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
529 reg_to_dbg(vcpu, p, rd, dbg_reg);
531 dbg_to_reg(vcpu, p, rd, dbg_reg);
533 trace_trap_reg(__func__, rd->CRm, p->is_write,
534 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
539 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
540 const struct kvm_one_reg *reg, void __user *uaddr)
542 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
544 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
549 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
550 const struct kvm_one_reg *reg, void __user *uaddr)
552 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
554 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
559 static void reset_wvr(struct kvm_vcpu *vcpu,
560 const struct sys_reg_desc *rd)
562 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
565 static bool trap_wcr(struct kvm_vcpu *vcpu,
566 struct sys_reg_params *p,
567 const struct sys_reg_desc *rd)
569 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
572 reg_to_dbg(vcpu, p, rd, dbg_reg);
574 dbg_to_reg(vcpu, p, rd, dbg_reg);
576 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
581 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
582 const struct kvm_one_reg *reg, void __user *uaddr)
584 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
586 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
591 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
592 const struct kvm_one_reg *reg, void __user *uaddr)
594 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
596 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
601 static void reset_wcr(struct kvm_vcpu *vcpu,
602 const struct sys_reg_desc *rd)
604 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
607 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
609 u64 amair = read_sysreg(amair_el1);
610 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
613 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
615 u64 actlr = read_sysreg(actlr_el1);
616 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
619 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
624 * Map the vcpu_id into the first three affinity level fields of
625 * the MPIDR. We limit the number of VCPUs in level 0 due to a
626 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
627 * of the GICv3 to be able to address each CPU directly when
630 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
631 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
632 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
633 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
636 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
637 const struct sys_reg_desc *r)
639 if (kvm_vcpu_has_pmu(vcpu))
645 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
647 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
649 /* No PMU available, any PMU reg may UNDEF... */
650 if (!kvm_arm_support_pmu_v3())
653 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
654 n &= ARMV8_PMU_PMCR_N_MASK;
656 mask |= GENMASK(n - 1, 0);
658 reset_unknown(vcpu, r);
659 __vcpu_sys_reg(vcpu, r->reg) &= mask;
662 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
664 reset_unknown(vcpu, r);
665 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
668 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
670 reset_unknown(vcpu, r);
671 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
674 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
676 reset_unknown(vcpu, r);
677 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
680 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
684 /* No PMU available, PMCR_EL0 may UNDEF... */
685 if (!kvm_arm_support_pmu_v3())
688 pmcr = read_sysreg(pmcr_el0);
690 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
691 * except PMCR.E resetting to zero.
693 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
694 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
695 if (!system_supports_32bit_el0())
696 val |= ARMV8_PMU_PMCR_LC;
697 __vcpu_sys_reg(vcpu, r->reg) = val;
700 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
702 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
703 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
706 kvm_inject_undefined(vcpu);
711 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
713 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
716 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
718 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
721 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
723 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
726 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
728 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
731 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
732 const struct sys_reg_desc *r)
736 if (pmu_access_el0_disabled(vcpu))
740 /* Only update writeable bits of PMCR */
741 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
742 val &= ~ARMV8_PMU_PMCR_MASK;
743 val |= p->regval & ARMV8_PMU_PMCR_MASK;
744 if (!system_supports_32bit_el0())
745 val |= ARMV8_PMU_PMCR_LC;
746 __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
747 kvm_pmu_handle_pmcr(vcpu, val);
748 kvm_vcpu_pmu_restore_guest(vcpu);
750 /* PMCR.P & PMCR.C are RAZ */
751 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
752 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
759 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
760 const struct sys_reg_desc *r)
762 if (pmu_access_event_counter_el0_disabled(vcpu))
766 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
768 /* return PMSELR.SEL field */
769 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
770 & ARMV8_PMU_COUNTER_MASK;
775 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
776 const struct sys_reg_desc *r)
778 u64 pmceid, mask, shift;
782 if (pmu_access_el0_disabled(vcpu))
785 get_access_mask(r, &mask, &shift);
787 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
796 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
800 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
801 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
802 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
803 kvm_inject_undefined(vcpu);
810 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
811 struct sys_reg_params *p,
812 const struct sys_reg_desc *r)
816 if (r->CRn == 9 && r->CRm == 13) {
819 if (pmu_access_event_counter_el0_disabled(vcpu))
822 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
823 & ARMV8_PMU_COUNTER_MASK;
824 } else if (r->Op2 == 0) {
826 if (pmu_access_cycle_counter_el0_disabled(vcpu))
829 idx = ARMV8_PMU_CYCLE_IDX;
831 } else if (r->CRn == 0 && r->CRm == 9) {
833 if (pmu_access_event_counter_el0_disabled(vcpu))
836 idx = ARMV8_PMU_CYCLE_IDX;
837 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
839 if (pmu_access_event_counter_el0_disabled(vcpu))
842 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
845 /* Catch any decoding mistake */
846 WARN_ON(idx == ~0UL);
848 if (!pmu_counter_idx_valid(vcpu, idx))
852 if (pmu_access_el0_disabled(vcpu))
855 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
857 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
863 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
864 const struct sys_reg_desc *r)
868 if (pmu_access_el0_disabled(vcpu))
871 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
873 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
874 reg = PMEVTYPER0_EL0 + idx;
875 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
876 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
877 if (idx == ARMV8_PMU_CYCLE_IDX)
881 reg = PMEVTYPER0_EL0 + idx;
886 if (!pmu_counter_idx_valid(vcpu, idx))
890 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
891 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
892 kvm_vcpu_pmu_restore_guest(vcpu);
894 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
900 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
901 const struct sys_reg_desc *r)
905 if (pmu_access_el0_disabled(vcpu))
908 mask = kvm_pmu_valid_counter_mask(vcpu);
910 val = p->regval & mask;
912 /* accessing PMCNTENSET_EL0 */
913 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
914 kvm_pmu_enable_counter_mask(vcpu, val);
915 kvm_vcpu_pmu_restore_guest(vcpu);
917 /* accessing PMCNTENCLR_EL0 */
918 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
919 kvm_pmu_disable_counter_mask(vcpu, val);
922 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
928 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
929 const struct sys_reg_desc *r)
931 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
933 if (check_pmu_access_disabled(vcpu, 0))
937 u64 val = p->regval & mask;
940 /* accessing PMINTENSET_EL1 */
941 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
943 /* accessing PMINTENCLR_EL1 */
944 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
946 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
952 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
953 const struct sys_reg_desc *r)
955 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
957 if (pmu_access_el0_disabled(vcpu))
962 /* accessing PMOVSSET_EL0 */
963 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
965 /* accessing PMOVSCLR_EL0 */
966 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
968 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
974 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
975 const struct sys_reg_desc *r)
980 return read_from_write_only(vcpu, p, r);
982 if (pmu_write_swinc_el0_disabled(vcpu))
985 mask = kvm_pmu_valid_counter_mask(vcpu);
986 kvm_pmu_software_increment(vcpu, p->regval & mask);
990 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
991 const struct sys_reg_desc *r)
994 if (!vcpu_mode_priv(vcpu)) {
995 kvm_inject_undefined(vcpu);
999 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1000 p->regval & ARMV8_PMU_USERENR_MASK;
1002 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1003 & ARMV8_PMU_USERENR_MASK;
1009 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1010 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1011 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1012 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1013 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1014 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1015 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1016 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1017 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1018 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1020 #define PMU_SYS_REG(r) \
1021 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
1023 /* Macro to expand the PMEVCNTRn_EL0 register */
1024 #define PMU_PMEVCNTR_EL0(n) \
1025 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
1026 .reset = reset_pmevcntr, \
1027 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1029 /* Macro to expand the PMEVTYPERn_EL0 register */
1030 #define PMU_PMEVTYPER_EL0(n) \
1031 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
1032 .reset = reset_pmevtyper, \
1033 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1035 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1036 const struct sys_reg_desc *r)
1038 kvm_inject_undefined(vcpu);
1043 /* Macro to expand the AMU counter and type registers*/
1044 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1045 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1046 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1047 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1049 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1050 const struct sys_reg_desc *rd)
1052 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1056 * If we land here on a PtrAuth access, that is because we didn't
1057 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1058 * way this happens is when the guest does not have PtrAuth support
1061 #define __PTRAUTH_KEY(k) \
1062 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1063 .visibility = ptrauth_visibility}
1065 #define PTRAUTH_KEY(k) \
1066 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1067 __PTRAUTH_KEY(k ## KEYHI_EL1)
1069 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1070 struct sys_reg_params *p,
1071 const struct sys_reg_desc *r)
1073 enum kvm_arch_timers tmr;
1074 enum kvm_arch_timer_regs treg;
1075 u64 reg = reg_to_encoding(r);
1078 case SYS_CNTP_TVAL_EL0:
1079 case SYS_AARCH32_CNTP_TVAL:
1081 treg = TIMER_REG_TVAL;
1083 case SYS_CNTP_CTL_EL0:
1084 case SYS_AARCH32_CNTP_CTL:
1086 treg = TIMER_REG_CTL;
1088 case SYS_CNTP_CVAL_EL0:
1089 case SYS_AARCH32_CNTP_CVAL:
1091 treg = TIMER_REG_CVAL;
1098 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1100 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1105 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1106 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
1107 struct sys_reg_desc const *r, bool raz)
1109 u32 id = reg_to_encoding(r);
1115 val = read_sanitised_ftr_reg(id);
1118 case SYS_ID_AA64PFR0_EL1:
1119 if (!vcpu_has_sve(vcpu))
1120 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
1121 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
1122 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
1123 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1124 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
1125 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1126 if (kvm_vgic_global_state.type == VGIC_V3) {
1127 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
1128 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
1131 case SYS_ID_AA64PFR1_EL1:
1132 if (!kvm_has_mte(vcpu->kvm))
1133 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
1135 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
1137 case SYS_ID_AA64ISAR1_EL1:
1138 if (!vcpu_has_ptrauth(vcpu))
1139 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
1140 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
1141 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
1142 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
1144 case SYS_ID_AA64ISAR2_EL1:
1145 if (!vcpu_has_ptrauth(vcpu))
1146 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
1147 ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
1149 case SYS_ID_AA64DFR0_EL1:
1150 /* Limit debug to ARMv8.0 */
1151 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
1152 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
1153 /* Limit guests to PMUv3 for ARMv8.4 */
1154 val = cpuid_feature_cap_perfmon_field(val,
1155 ID_AA64DFR0_PMUVER_SHIFT,
1156 kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
1157 /* Hide SPE from guests */
1158 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
1160 case SYS_ID_DFR0_EL1:
1161 /* Limit guests to PMUv3 for ARMv8.4 */
1162 val = cpuid_feature_cap_perfmon_field(val,
1163 ID_DFR0_PERFMON_SHIFT,
1164 kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
1171 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1172 const struct sys_reg_desc *r)
1174 u32 id = reg_to_encoding(r);
1177 case SYS_ID_AA64ZFR0_EL1:
1178 if (!vcpu_has_sve(vcpu))
1186 /* cpufeature ID register access trap handlers */
1188 static bool __access_id_reg(struct kvm_vcpu *vcpu,
1189 struct sys_reg_params *p,
1190 const struct sys_reg_desc *r,
1194 return write_to_read_only(vcpu, p, r);
1196 p->regval = read_id_reg(vcpu, r, raz);
1200 static bool access_id_reg(struct kvm_vcpu *vcpu,
1201 struct sys_reg_params *p,
1202 const struct sys_reg_desc *r)
1204 bool raz = sysreg_visible_as_raz(vcpu, r);
1206 return __access_id_reg(vcpu, p, r, raz);
1209 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
1210 struct sys_reg_params *p,
1211 const struct sys_reg_desc *r)
1213 return __access_id_reg(vcpu, p, r, true);
1216 /* Visibility overrides for SVE-specific control registers */
1217 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1218 const struct sys_reg_desc *rd)
1220 if (vcpu_has_sve(vcpu))
1226 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1227 const struct sys_reg_desc *rd,
1228 const struct kvm_one_reg *reg, void __user *uaddr)
1230 const u64 id = sys_reg_to_index(rd);
1235 err = reg_from_user(&val, uaddr, id);
1240 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1241 * it doesn't promise more than what is actually provided (the
1242 * guest could otherwise be covered in ectoplasmic residue).
1244 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
1246 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1249 /* Same thing for CSV3 */
1250 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT);
1252 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1255 /* We can only differ with CSV[23], and anything else is an error */
1256 val ^= read_id_reg(vcpu, rd, false);
1257 val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) |
1258 (0xFUL << ID_AA64PFR0_CSV3_SHIFT));
1262 vcpu->kvm->arch.pfr0_csv2 = csv2;
1263 vcpu->kvm->arch.pfr0_csv3 = csv3 ;
1269 * cpufeature ID register user accessors
1271 * For now, these registers are immutable for userspace, so no values
1272 * are stored, and for set_id_reg() we don't allow the effective value
1275 static int __get_id_reg(const struct kvm_vcpu *vcpu,
1276 const struct sys_reg_desc *rd, void __user *uaddr,
1279 const u64 id = sys_reg_to_index(rd);
1280 const u64 val = read_id_reg(vcpu, rd, raz);
1282 return reg_to_user(uaddr, &val, id);
1285 static int __set_id_reg(const struct kvm_vcpu *vcpu,
1286 const struct sys_reg_desc *rd, void __user *uaddr,
1289 const u64 id = sys_reg_to_index(rd);
1293 err = reg_from_user(&val, uaddr, id);
1297 /* This is what we mean by invariant: you can't change it. */
1298 if (val != read_id_reg(vcpu, rd, raz))
1304 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1305 const struct kvm_one_reg *reg, void __user *uaddr)
1307 bool raz = sysreg_visible_as_raz(vcpu, rd);
1309 return __get_id_reg(vcpu, rd, uaddr, raz);
1312 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1313 const struct kvm_one_reg *reg, void __user *uaddr)
1315 bool raz = sysreg_visible_as_raz(vcpu, rd);
1317 return __set_id_reg(vcpu, rd, uaddr, raz);
1320 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1321 const struct kvm_one_reg *reg, void __user *uaddr)
1323 return __set_id_reg(vcpu, rd, uaddr, true);
1326 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1327 const struct kvm_one_reg *reg, void __user *uaddr)
1329 const u64 id = sys_reg_to_index(rd);
1332 return reg_to_user(uaddr, &val, id);
1335 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1336 const struct kvm_one_reg *reg, void __user *uaddr)
1341 /* Perform the access even if we are going to ignore the value */
1342 err = reg_from_user(&val, uaddr, sys_reg_to_index(rd));
1349 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1350 const struct sys_reg_desc *r)
1353 return write_to_read_only(vcpu, p, r);
1355 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1359 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1360 const struct sys_reg_desc *r)
1363 return write_to_read_only(vcpu, p, r);
1365 p->regval = read_sysreg(clidr_el1);
1369 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1370 const struct sys_reg_desc *r)
1375 vcpu_write_sys_reg(vcpu, p->regval, reg);
1377 p->regval = vcpu_read_sys_reg(vcpu, reg);
1381 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1382 const struct sys_reg_desc *r)
1387 return write_to_read_only(vcpu, p, r);
1389 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1390 p->regval = get_ccsidr(csselr);
1393 * Guests should not be doing cache operations by set/way at all, and
1394 * for this reason, we trap them and attempt to infer the intent, so
1395 * that we can flush the entire guest's address space at the appropriate
1397 * To prevent this trapping from causing performance problems, let's
1398 * expose the geometry of all data and unified caches (which are
1399 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1400 * [If guests should attempt to infer aliasing properties from the
1401 * geometry (which is not permitted by the architecture), they would
1402 * only do so for virtually indexed caches.]
1404 if (!(csselr & 1)) // data or unified cache
1405 p->regval &= ~GENMASK(27, 3);
1409 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1410 const struct sys_reg_desc *rd)
1412 if (kvm_has_mte(vcpu->kvm))
1418 #define MTE_REG(name) { \
1419 SYS_DESC(SYS_##name), \
1420 .access = undef_access, \
1421 .reset = reset_unknown, \
1423 .visibility = mte_visibility, \
1426 /* sys_reg_desc initialiser for known cpufeature ID registers */
1427 #define ID_SANITISED(name) { \
1428 SYS_DESC(SYS_##name), \
1429 .access = access_id_reg, \
1430 .get_user = get_id_reg, \
1431 .set_user = set_id_reg, \
1432 .visibility = id_visibility, \
1436 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1437 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1438 * (1 <= crm < 8, 0 <= Op2 < 8).
1440 #define ID_UNALLOCATED(crm, op2) { \
1441 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1442 .access = access_raz_id_reg, \
1443 .get_user = get_raz_reg, \
1444 .set_user = set_raz_id_reg, \
1448 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1449 * For now, these are exposed just like unallocated ID regs: they appear
1450 * RAZ for the guest.
1452 #define ID_HIDDEN(name) { \
1453 SYS_DESC(SYS_##name), \
1454 .access = access_raz_id_reg, \
1455 .get_user = get_raz_reg, \
1456 .set_user = set_raz_id_reg, \
1460 * Architected system registers.
1461 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1463 * Debug handling: We do trap most, if not all debug related system
1464 * registers. The implementation is good enough to ensure that a guest
1465 * can use these with minimal performance degradation. The drawback is
1466 * that we don't implement any of the external debug architecture.
1467 * This should be revisited if we ever encounter a more demanding
1470 static const struct sys_reg_desc sys_reg_descs[] = {
1471 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1472 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1473 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1475 DBG_BCR_BVR_WCR_WVR_EL1(0),
1476 DBG_BCR_BVR_WCR_WVR_EL1(1),
1477 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1478 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1479 DBG_BCR_BVR_WCR_WVR_EL1(2),
1480 DBG_BCR_BVR_WCR_WVR_EL1(3),
1481 DBG_BCR_BVR_WCR_WVR_EL1(4),
1482 DBG_BCR_BVR_WCR_WVR_EL1(5),
1483 DBG_BCR_BVR_WCR_WVR_EL1(6),
1484 DBG_BCR_BVR_WCR_WVR_EL1(7),
1485 DBG_BCR_BVR_WCR_WVR_EL1(8),
1486 DBG_BCR_BVR_WCR_WVR_EL1(9),
1487 DBG_BCR_BVR_WCR_WVR_EL1(10),
1488 DBG_BCR_BVR_WCR_WVR_EL1(11),
1489 DBG_BCR_BVR_WCR_WVR_EL1(12),
1490 DBG_BCR_BVR_WCR_WVR_EL1(13),
1491 DBG_BCR_BVR_WCR_WVR_EL1(14),
1492 DBG_BCR_BVR_WCR_WVR_EL1(15),
1494 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1495 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1496 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1497 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1498 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1499 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1500 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1501 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1502 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1504 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1505 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1506 // DBGDTR[TR]X_EL0 share the same encoding
1507 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1509 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1511 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1514 * ID regs: all ID_SANITISED() entries here must have corresponding
1515 * entries in arm64_ftr_regs[].
1518 /* AArch64 mappings of the AArch32 ID registers */
1520 ID_SANITISED(ID_PFR0_EL1),
1521 ID_SANITISED(ID_PFR1_EL1),
1522 ID_SANITISED(ID_DFR0_EL1),
1523 ID_HIDDEN(ID_AFR0_EL1),
1524 ID_SANITISED(ID_MMFR0_EL1),
1525 ID_SANITISED(ID_MMFR1_EL1),
1526 ID_SANITISED(ID_MMFR2_EL1),
1527 ID_SANITISED(ID_MMFR3_EL1),
1530 ID_SANITISED(ID_ISAR0_EL1),
1531 ID_SANITISED(ID_ISAR1_EL1),
1532 ID_SANITISED(ID_ISAR2_EL1),
1533 ID_SANITISED(ID_ISAR3_EL1),
1534 ID_SANITISED(ID_ISAR4_EL1),
1535 ID_SANITISED(ID_ISAR5_EL1),
1536 ID_SANITISED(ID_MMFR4_EL1),
1537 ID_SANITISED(ID_ISAR6_EL1),
1540 ID_SANITISED(MVFR0_EL1),
1541 ID_SANITISED(MVFR1_EL1),
1542 ID_SANITISED(MVFR2_EL1),
1543 ID_UNALLOCATED(3,3),
1544 ID_SANITISED(ID_PFR2_EL1),
1545 ID_HIDDEN(ID_DFR1_EL1),
1546 ID_SANITISED(ID_MMFR5_EL1),
1547 ID_UNALLOCATED(3,7),
1549 /* AArch64 ID registers */
1551 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1552 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1553 ID_SANITISED(ID_AA64PFR1_EL1),
1554 ID_UNALLOCATED(4,2),
1555 ID_UNALLOCATED(4,3),
1556 ID_SANITISED(ID_AA64ZFR0_EL1),
1557 ID_HIDDEN(ID_AA64SMFR0_EL1),
1558 ID_UNALLOCATED(4,6),
1559 ID_UNALLOCATED(4,7),
1562 ID_SANITISED(ID_AA64DFR0_EL1),
1563 ID_SANITISED(ID_AA64DFR1_EL1),
1564 ID_UNALLOCATED(5,2),
1565 ID_UNALLOCATED(5,3),
1566 ID_HIDDEN(ID_AA64AFR0_EL1),
1567 ID_HIDDEN(ID_AA64AFR1_EL1),
1568 ID_UNALLOCATED(5,6),
1569 ID_UNALLOCATED(5,7),
1572 ID_SANITISED(ID_AA64ISAR0_EL1),
1573 ID_SANITISED(ID_AA64ISAR1_EL1),
1574 ID_SANITISED(ID_AA64ISAR2_EL1),
1575 ID_UNALLOCATED(6,3),
1576 ID_UNALLOCATED(6,4),
1577 ID_UNALLOCATED(6,5),
1578 ID_UNALLOCATED(6,6),
1579 ID_UNALLOCATED(6,7),
1582 ID_SANITISED(ID_AA64MMFR0_EL1),
1583 ID_SANITISED(ID_AA64MMFR1_EL1),
1584 ID_SANITISED(ID_AA64MMFR2_EL1),
1585 ID_UNALLOCATED(7,3),
1586 ID_UNALLOCATED(7,4),
1587 ID_UNALLOCATED(7,5),
1588 ID_UNALLOCATED(7,6),
1589 ID_UNALLOCATED(7,7),
1591 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1592 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1593 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1598 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1599 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1600 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1601 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1602 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1603 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1604 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1612 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1613 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1614 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1616 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1617 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1618 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1619 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1620 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1621 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1622 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1623 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1626 MTE_REG(TFSRE0_EL1),
1628 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1629 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1631 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1632 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1633 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1634 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1635 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1636 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1637 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1638 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1639 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1640 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1641 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1642 /* PMBIDR_EL1 is not trapped */
1644 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1645 .access = access_pminten, .reg = PMINTENSET_EL1 },
1646 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1647 .access = access_pminten, .reg = PMINTENSET_EL1 },
1648 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1650 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1651 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1653 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1654 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1655 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1656 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1657 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1659 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1660 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1662 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1663 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1664 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1665 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1666 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1667 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1668 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1669 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1670 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1671 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1672 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1673 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1675 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1676 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1678 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1680 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1682 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1683 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1684 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1685 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1686 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1687 { SYS_DESC(SYS_SVCR), undef_access },
1689 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1690 .reset = reset_pmcr, .reg = PMCR_EL0 },
1691 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1692 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1693 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1694 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1695 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1696 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1698 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
1699 * previously (and pointlessly) advertised in the past...
1701 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1702 .get_user = get_raz_reg, .set_user = set_wi_reg,
1703 .access = access_pmswinc, .reset = NULL },
1704 { PMU_SYS_REG(SYS_PMSELR_EL0),
1705 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1706 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1707 .access = access_pmceid, .reset = NULL },
1708 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1709 .access = access_pmceid, .reset = NULL },
1710 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1711 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1712 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1713 .access = access_pmu_evtyper, .reset = NULL },
1714 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1715 .access = access_pmu_evcntr, .reset = NULL },
1717 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1718 * in 32bit mode. Here we choose to reset it as zero for consistency.
1720 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1721 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1722 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1723 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1725 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1726 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1727 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
1729 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1731 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1732 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1733 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1734 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1735 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1736 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1737 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1738 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1739 AMU_AMEVCNTR0_EL0(0),
1740 AMU_AMEVCNTR0_EL0(1),
1741 AMU_AMEVCNTR0_EL0(2),
1742 AMU_AMEVCNTR0_EL0(3),
1743 AMU_AMEVCNTR0_EL0(4),
1744 AMU_AMEVCNTR0_EL0(5),
1745 AMU_AMEVCNTR0_EL0(6),
1746 AMU_AMEVCNTR0_EL0(7),
1747 AMU_AMEVCNTR0_EL0(8),
1748 AMU_AMEVCNTR0_EL0(9),
1749 AMU_AMEVCNTR0_EL0(10),
1750 AMU_AMEVCNTR0_EL0(11),
1751 AMU_AMEVCNTR0_EL0(12),
1752 AMU_AMEVCNTR0_EL0(13),
1753 AMU_AMEVCNTR0_EL0(14),
1754 AMU_AMEVCNTR0_EL0(15),
1755 AMU_AMEVTYPER0_EL0(0),
1756 AMU_AMEVTYPER0_EL0(1),
1757 AMU_AMEVTYPER0_EL0(2),
1758 AMU_AMEVTYPER0_EL0(3),
1759 AMU_AMEVTYPER0_EL0(4),
1760 AMU_AMEVTYPER0_EL0(5),
1761 AMU_AMEVTYPER0_EL0(6),
1762 AMU_AMEVTYPER0_EL0(7),
1763 AMU_AMEVTYPER0_EL0(8),
1764 AMU_AMEVTYPER0_EL0(9),
1765 AMU_AMEVTYPER0_EL0(10),
1766 AMU_AMEVTYPER0_EL0(11),
1767 AMU_AMEVTYPER0_EL0(12),
1768 AMU_AMEVTYPER0_EL0(13),
1769 AMU_AMEVTYPER0_EL0(14),
1770 AMU_AMEVTYPER0_EL0(15),
1771 AMU_AMEVCNTR1_EL0(0),
1772 AMU_AMEVCNTR1_EL0(1),
1773 AMU_AMEVCNTR1_EL0(2),
1774 AMU_AMEVCNTR1_EL0(3),
1775 AMU_AMEVCNTR1_EL0(4),
1776 AMU_AMEVCNTR1_EL0(5),
1777 AMU_AMEVCNTR1_EL0(6),
1778 AMU_AMEVCNTR1_EL0(7),
1779 AMU_AMEVCNTR1_EL0(8),
1780 AMU_AMEVCNTR1_EL0(9),
1781 AMU_AMEVCNTR1_EL0(10),
1782 AMU_AMEVCNTR1_EL0(11),
1783 AMU_AMEVCNTR1_EL0(12),
1784 AMU_AMEVCNTR1_EL0(13),
1785 AMU_AMEVCNTR1_EL0(14),
1786 AMU_AMEVCNTR1_EL0(15),
1787 AMU_AMEVTYPER1_EL0(0),
1788 AMU_AMEVTYPER1_EL0(1),
1789 AMU_AMEVTYPER1_EL0(2),
1790 AMU_AMEVTYPER1_EL0(3),
1791 AMU_AMEVTYPER1_EL0(4),
1792 AMU_AMEVTYPER1_EL0(5),
1793 AMU_AMEVTYPER1_EL0(6),
1794 AMU_AMEVTYPER1_EL0(7),
1795 AMU_AMEVTYPER1_EL0(8),
1796 AMU_AMEVTYPER1_EL0(9),
1797 AMU_AMEVTYPER1_EL0(10),
1798 AMU_AMEVTYPER1_EL0(11),
1799 AMU_AMEVTYPER1_EL0(12),
1800 AMU_AMEVTYPER1_EL0(13),
1801 AMU_AMEVTYPER1_EL0(14),
1802 AMU_AMEVTYPER1_EL0(15),
1804 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1805 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1806 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1809 PMU_PMEVCNTR_EL0(0),
1810 PMU_PMEVCNTR_EL0(1),
1811 PMU_PMEVCNTR_EL0(2),
1812 PMU_PMEVCNTR_EL0(3),
1813 PMU_PMEVCNTR_EL0(4),
1814 PMU_PMEVCNTR_EL0(5),
1815 PMU_PMEVCNTR_EL0(6),
1816 PMU_PMEVCNTR_EL0(7),
1817 PMU_PMEVCNTR_EL0(8),
1818 PMU_PMEVCNTR_EL0(9),
1819 PMU_PMEVCNTR_EL0(10),
1820 PMU_PMEVCNTR_EL0(11),
1821 PMU_PMEVCNTR_EL0(12),
1822 PMU_PMEVCNTR_EL0(13),
1823 PMU_PMEVCNTR_EL0(14),
1824 PMU_PMEVCNTR_EL0(15),
1825 PMU_PMEVCNTR_EL0(16),
1826 PMU_PMEVCNTR_EL0(17),
1827 PMU_PMEVCNTR_EL0(18),
1828 PMU_PMEVCNTR_EL0(19),
1829 PMU_PMEVCNTR_EL0(20),
1830 PMU_PMEVCNTR_EL0(21),
1831 PMU_PMEVCNTR_EL0(22),
1832 PMU_PMEVCNTR_EL0(23),
1833 PMU_PMEVCNTR_EL0(24),
1834 PMU_PMEVCNTR_EL0(25),
1835 PMU_PMEVCNTR_EL0(26),
1836 PMU_PMEVCNTR_EL0(27),
1837 PMU_PMEVCNTR_EL0(28),
1838 PMU_PMEVCNTR_EL0(29),
1839 PMU_PMEVCNTR_EL0(30),
1840 /* PMEVTYPERn_EL0 */
1841 PMU_PMEVTYPER_EL0(0),
1842 PMU_PMEVTYPER_EL0(1),
1843 PMU_PMEVTYPER_EL0(2),
1844 PMU_PMEVTYPER_EL0(3),
1845 PMU_PMEVTYPER_EL0(4),
1846 PMU_PMEVTYPER_EL0(5),
1847 PMU_PMEVTYPER_EL0(6),
1848 PMU_PMEVTYPER_EL0(7),
1849 PMU_PMEVTYPER_EL0(8),
1850 PMU_PMEVTYPER_EL0(9),
1851 PMU_PMEVTYPER_EL0(10),
1852 PMU_PMEVTYPER_EL0(11),
1853 PMU_PMEVTYPER_EL0(12),
1854 PMU_PMEVTYPER_EL0(13),
1855 PMU_PMEVTYPER_EL0(14),
1856 PMU_PMEVTYPER_EL0(15),
1857 PMU_PMEVTYPER_EL0(16),
1858 PMU_PMEVTYPER_EL0(17),
1859 PMU_PMEVTYPER_EL0(18),
1860 PMU_PMEVTYPER_EL0(19),
1861 PMU_PMEVTYPER_EL0(20),
1862 PMU_PMEVTYPER_EL0(21),
1863 PMU_PMEVTYPER_EL0(22),
1864 PMU_PMEVTYPER_EL0(23),
1865 PMU_PMEVTYPER_EL0(24),
1866 PMU_PMEVTYPER_EL0(25),
1867 PMU_PMEVTYPER_EL0(26),
1868 PMU_PMEVTYPER_EL0(27),
1869 PMU_PMEVTYPER_EL0(28),
1870 PMU_PMEVTYPER_EL0(29),
1871 PMU_PMEVTYPER_EL0(30),
1873 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1874 * in 32bit mode. Here we choose to reset it as zero for consistency.
1876 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1877 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1879 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1880 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1881 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1884 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1885 struct sys_reg_params *p,
1886 const struct sys_reg_desc *r)
1889 return ignore_write(vcpu, p);
1891 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1892 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1893 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1895 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1896 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1897 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1898 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1904 * AArch32 debug register mappings
1906 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1907 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1909 * None of the other registers share their location, so treat them as
1910 * if they were 64bit.
1912 #define DBG_BCR_BVR_WCR_WVR(n) \
1914 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1916 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1918 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1920 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1922 #define DBGBXVR(n) \
1923 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1926 * Trapped cp14 registers. We generally ignore most of the external
1927 * debug, on the principle that they don't really make sense to a
1928 * guest. Revisit this one day, would this principle change.
1930 static const struct sys_reg_desc cp14_regs[] = {
1932 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1934 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1936 DBG_BCR_BVR_WCR_WVR(0),
1938 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1939 DBG_BCR_BVR_WCR_WVR(1),
1941 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1943 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1944 DBG_BCR_BVR_WCR_WVR(2),
1945 /* DBGDTR[RT]Xint */
1946 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1947 /* DBGDTR[RT]Xext */
1948 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1949 DBG_BCR_BVR_WCR_WVR(3),
1950 DBG_BCR_BVR_WCR_WVR(4),
1951 DBG_BCR_BVR_WCR_WVR(5),
1953 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1955 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1956 DBG_BCR_BVR_WCR_WVR(6),
1958 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1959 DBG_BCR_BVR_WCR_WVR(7),
1960 DBG_BCR_BVR_WCR_WVR(8),
1961 DBG_BCR_BVR_WCR_WVR(9),
1962 DBG_BCR_BVR_WCR_WVR(10),
1963 DBG_BCR_BVR_WCR_WVR(11),
1964 DBG_BCR_BVR_WCR_WVR(12),
1965 DBG_BCR_BVR_WCR_WVR(13),
1966 DBG_BCR_BVR_WCR_WVR(14),
1967 DBG_BCR_BVR_WCR_WVR(15),
1969 /* DBGDRAR (32bit) */
1970 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1974 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
1977 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
1981 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1984 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1997 /* DBGDSAR (32bit) */
1998 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2001 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2003 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2005 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2007 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2009 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2011 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2014 /* Trapped cp14 64bit registers */
2015 static const struct sys_reg_desc cp14_64_regs[] = {
2016 /* DBGDRAR (64bit) */
2017 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2019 /* DBGDSAR (64bit) */
2020 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2023 /* Macro to expand the PMEVCNTRn register */
2024 #define PMU_PMEVCNTR(n) \
2026 { Op1(0), CRn(0b1110), \
2027 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
2030 /* Macro to expand the PMEVTYPERn register */
2031 #define PMU_PMEVTYPER(n) \
2033 { Op1(0), CRn(0b1110), \
2034 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
2035 access_pmu_evtyper }
2038 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2039 * depending on the way they are accessed (as a 32bit or a 64bit
2042 static const struct sys_reg_desc cp15_regs[] = {
2043 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2044 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2046 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2048 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2049 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2050 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2052 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2054 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2055 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2057 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2058 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2060 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2062 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2064 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2066 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2069 * DC{C,I,CI}SW operations:
2071 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2072 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2073 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2076 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
2077 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
2078 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
2079 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
2080 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
2081 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
2082 { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
2083 { AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
2084 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
2085 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
2086 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
2087 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
2088 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
2089 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
2090 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
2091 { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
2092 { AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
2094 { Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
2097 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2099 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2101 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2103 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2106 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2108 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2111 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2112 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2179 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
2181 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2182 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2183 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2186 static const struct sys_reg_desc cp15_64_regs[] = {
2187 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2188 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
2189 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2190 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2191 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2192 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2193 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2196 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2201 for (i = 0; i < n; i++) {
2202 if (!is_32 && table[i].reg && !table[i].reset) {
2203 kvm_err("sys_reg table %p entry %d has lacks reset\n",
2208 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2209 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2217 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2219 kvm_inject_undefined(vcpu);
2223 static void perform_access(struct kvm_vcpu *vcpu,
2224 struct sys_reg_params *params,
2225 const struct sys_reg_desc *r)
2227 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2229 /* Check for regs disabled by runtime config */
2230 if (sysreg_hidden(vcpu, r)) {
2231 kvm_inject_undefined(vcpu);
2236 * Not having an accessor means that we have configured a trap
2237 * that we don't know how to handle. This certainly qualifies
2238 * as a gross bug that should be fixed right away.
2242 /* Skip instruction if instructed so */
2243 if (likely(r->access(vcpu, params, r)))
2248 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2249 * call the corresponding trap handler.
2251 * @params: pointer to the descriptor of the access
2252 * @table: array of trap descriptors
2253 * @num: size of the trap descriptor array
2255 * Return 0 if the access has been handled, and -1 if not.
2257 static int emulate_cp(struct kvm_vcpu *vcpu,
2258 struct sys_reg_params *params,
2259 const struct sys_reg_desc *table,
2262 const struct sys_reg_desc *r;
2265 return -1; /* Not handled */
2267 r = find_reg(params, table, num);
2270 perform_access(vcpu, params, r);
2278 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2279 struct sys_reg_params *params)
2281 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2285 case ESR_ELx_EC_CP15_32:
2286 case ESR_ELx_EC_CP15_64:
2289 case ESR_ELx_EC_CP14_MR:
2290 case ESR_ELx_EC_CP14_64:
2297 print_sys_reg_msg(params,
2298 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2299 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2300 kvm_inject_undefined(vcpu);
2304 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2305 * @vcpu: The VCPU pointer
2306 * @run: The kvm_run struct
2308 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2309 const struct sys_reg_desc *global,
2312 struct sys_reg_params params;
2313 u64 esr = kvm_vcpu_get_esr(vcpu);
2314 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2315 int Rt2 = (esr >> 10) & 0x1f;
2317 params.CRm = (esr >> 1) & 0xf;
2318 params.is_write = ((esr & 1) == 0);
2321 params.Op1 = (esr >> 16) & 0xf;
2326 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2327 * backends between AArch32 and AArch64, we get away with it.
2329 if (params.is_write) {
2330 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2331 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2335 * If the table contains a handler, handle the
2336 * potential register operation in the case of a read and return
2339 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2340 /* Split up the value between registers for the read side */
2341 if (!params.is_write) {
2342 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2343 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2349 unhandled_cp_access(vcpu, ¶ms);
2354 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2355 * @vcpu: The VCPU pointer
2356 * @run: The kvm_run struct
2358 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2359 const struct sys_reg_desc *global,
2362 struct sys_reg_params params;
2363 u64 esr = kvm_vcpu_get_esr(vcpu);
2364 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2366 params.CRm = (esr >> 1) & 0xf;
2367 params.regval = vcpu_get_reg(vcpu, Rt);
2368 params.is_write = ((esr & 1) == 0);
2369 params.CRn = (esr >> 10) & 0xf;
2371 params.Op1 = (esr >> 14) & 0x7;
2372 params.Op2 = (esr >> 17) & 0x7;
2374 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) {
2375 if (!params.is_write)
2376 vcpu_set_reg(vcpu, Rt, params.regval);
2380 unhandled_cp_access(vcpu, ¶ms);
2384 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2386 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2389 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2391 return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
2394 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2396 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2399 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2401 return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs));
2404 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2406 // See ARM DDI 0487E.a, section D12.3.2
2407 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2410 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
2411 struct sys_reg_params *params)
2413 const struct sys_reg_desc *r;
2415 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2418 perform_access(vcpu, params, r);
2419 } else if (is_imp_def_sys_reg(params)) {
2420 kvm_inject_undefined(vcpu);
2422 print_sys_reg_msg(params,
2423 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2424 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2425 kvm_inject_undefined(vcpu);
2431 * kvm_reset_sys_regs - sets system registers to reset value
2432 * @vcpu: The VCPU pointer
2434 * This function finds the right table above and sets the registers on the
2435 * virtual CPU struct to their architecturally defined reset values.
2437 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2441 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2442 if (sys_reg_descs[i].reset)
2443 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2447 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2448 * @vcpu: The VCPU pointer
2450 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2452 struct sys_reg_params params;
2453 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2454 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2457 trace_kvm_handle_sys_reg(esr);
2459 params = esr_sys64_to_params(esr);
2460 params.regval = vcpu_get_reg(vcpu, Rt);
2462 ret = emulate_sys_reg(vcpu, ¶ms);
2464 if (!params.is_write)
2465 vcpu_set_reg(vcpu, Rt, params.regval);
2469 /******************************************************************************
2471 *****************************************************************************/
2473 static bool index_to_params(u64 id, struct sys_reg_params *params)
2475 switch (id & KVM_REG_SIZE_MASK) {
2476 case KVM_REG_SIZE_U64:
2477 /* Any unused index bits means it's not valid. */
2478 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2479 | KVM_REG_ARM_COPROC_MASK
2480 | KVM_REG_ARM64_SYSREG_OP0_MASK
2481 | KVM_REG_ARM64_SYSREG_OP1_MASK
2482 | KVM_REG_ARM64_SYSREG_CRN_MASK
2483 | KVM_REG_ARM64_SYSREG_CRM_MASK
2484 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2486 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2487 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2488 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2489 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2490 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2491 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2492 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2493 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2494 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2495 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2502 const struct sys_reg_desc *find_reg_by_id(u64 id,
2503 struct sys_reg_params *params,
2504 const struct sys_reg_desc table[],
2507 if (!index_to_params(id, params))
2510 return find_reg(params, table, num);
2513 /* Decode an index value, and find the sys_reg_desc entry. */
2514 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2517 const struct sys_reg_desc *r;
2518 struct sys_reg_params params;
2520 /* We only do sys_reg for now. */
2521 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2524 if (!index_to_params(id, ¶ms))
2527 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2529 /* Not saved in the sys_reg array and not otherwise accessible? */
2530 if (r && !(r->reg || r->get_user))
2537 * These are the invariant sys_reg registers: we let the guest see the
2538 * host versions of these, so they're part of the guest state.
2540 * A future CPU may provide a mechanism to present different values to
2541 * the guest, or a future kvm may trap them.
2544 #define FUNCTION_INVARIANT(reg) \
2545 static void get_##reg(struct kvm_vcpu *v, \
2546 const struct sys_reg_desc *r) \
2548 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2551 FUNCTION_INVARIANT(midr_el1)
2552 FUNCTION_INVARIANT(revidr_el1)
2553 FUNCTION_INVARIANT(clidr_el1)
2554 FUNCTION_INVARIANT(aidr_el1)
2556 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2558 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2561 /* ->val is filled in by kvm_sys_reg_table_init() */
2562 static struct sys_reg_desc invariant_sys_regs[] = {
2563 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2564 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2565 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2566 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2567 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2570 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2572 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2577 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2579 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2584 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2586 struct sys_reg_params params;
2587 const struct sys_reg_desc *r;
2589 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2590 ARRAY_SIZE(invariant_sys_regs));
2594 return reg_to_user(uaddr, &r->val, id);
2597 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2599 struct sys_reg_params params;
2600 const struct sys_reg_desc *r;
2602 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2604 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2605 ARRAY_SIZE(invariant_sys_regs));
2609 err = reg_from_user(&val, uaddr, id);
2613 /* This is what we mean by invariant: you can't change it. */
2620 static bool is_valid_cache(u32 val)
2624 if (val >= CSSELR_MAX)
2627 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2629 ctype = (cache_levels >> (level * 3)) & 7;
2632 case 0: /* No cache */
2634 case 1: /* Instruction cache only */
2636 case 2: /* Data cache only */
2637 case 4: /* Unified cache */
2639 case 3: /* Separate instruction and data caches */
2641 default: /* Reserved: we can't know instruction or data. */
2646 static int demux_c15_get(u64 id, void __user *uaddr)
2649 u32 __user *uval = uaddr;
2651 /* Fail if we have unknown bits set. */
2652 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2653 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2656 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2657 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2658 if (KVM_REG_SIZE(id) != 4)
2660 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2661 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2662 if (!is_valid_cache(val))
2665 return put_user(get_ccsidr(val), uval);
2671 static int demux_c15_set(u64 id, void __user *uaddr)
2674 u32 __user *uval = uaddr;
2676 /* Fail if we have unknown bits set. */
2677 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2678 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2681 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2682 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2683 if (KVM_REG_SIZE(id) != 4)
2685 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2686 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2687 if (!is_valid_cache(val))
2690 if (get_user(newval, uval))
2693 /* This is also invariant: you can't change it. */
2694 if (newval != get_ccsidr(val))
2702 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2704 const struct sys_reg_desc *r;
2705 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2707 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2708 return demux_c15_get(reg->id, uaddr);
2710 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2713 r = index_to_sys_reg_desc(vcpu, reg->id);
2715 return get_invariant_sys_reg(reg->id, uaddr);
2717 /* Check for regs disabled by runtime config */
2718 if (sysreg_hidden(vcpu, r))
2722 return (r->get_user)(vcpu, r, reg, uaddr);
2724 return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
2727 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2729 const struct sys_reg_desc *r;
2730 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2732 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2733 return demux_c15_set(reg->id, uaddr);
2735 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2738 r = index_to_sys_reg_desc(vcpu, reg->id);
2740 return set_invariant_sys_reg(reg->id, uaddr);
2742 /* Check for regs disabled by runtime config */
2743 if (sysreg_hidden(vcpu, r))
2747 return (r->set_user)(vcpu, r, reg, uaddr);
2749 return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2752 static unsigned int num_demux_regs(void)
2754 unsigned int i, count = 0;
2756 for (i = 0; i < CSSELR_MAX; i++)
2757 if (is_valid_cache(i))
2763 static int write_demux_regids(u64 __user *uindices)
2765 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2768 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2769 for (i = 0; i < CSSELR_MAX; i++) {
2770 if (!is_valid_cache(i))
2772 if (put_user(val | i, uindices))
2779 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2781 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2782 KVM_REG_ARM64_SYSREG |
2783 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2784 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2785 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2786 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2787 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2790 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2795 if (put_user(sys_reg_to_index(reg), *uind))
2802 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2803 const struct sys_reg_desc *rd,
2805 unsigned int *total)
2808 * Ignore registers we trap but don't save,
2809 * and for which no custom user accessor is provided.
2811 if (!(rd->reg || rd->get_user))
2814 if (sysreg_hidden(vcpu, rd))
2817 if (!copy_reg_to_user(rd, uind))
2824 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2825 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2827 const struct sys_reg_desc *i2, *end2;
2828 unsigned int total = 0;
2832 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2834 while (i2 != end2) {
2835 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
2842 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2844 return ARRAY_SIZE(invariant_sys_regs)
2846 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2849 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2854 /* Then give them all the invariant registers' indices. */
2855 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2856 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2861 err = walk_sys_regs(vcpu, uindices);
2866 return write_demux_regids(uindices);
2869 void kvm_sys_reg_table_init(void)
2872 struct sys_reg_desc clidr;
2874 /* Make sure tables are unique and in order. */
2875 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
2876 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
2877 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
2878 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
2879 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
2880 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
2882 /* We abuse the reset function to overwrite the table itself. */
2883 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2884 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2887 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2889 * If software reads the Cache Type fields from Ctype1
2890 * upwards, once it has seen a value of 0b000, no caches
2891 * exist at further-out levels of the hierarchy. So, for
2892 * example, if Ctype3 is the first Cache Type field with a
2893 * value of 0b000, the values of Ctype4 to Ctype7 must be
2896 get_clidr_el1(NULL, &clidr); /* Ugly... */
2897 cache_levels = clidr.val;
2898 for (i = 0; i < 7; i++)
2899 if (((cache_levels >> (i*3)) & 7) == 0)
2901 /* Clear all higher bits. */
2902 cache_levels &= (1 << (i*3))-1;