1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
28 #include <asm/kvm_asm.h>
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 #include <kvm/arm_vgic.h>
35 #include <kvm/arm_arch_timer.h>
36 #include <kvm/arm_pmu.h>
38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 #define KVM_VCPU_MAX_FEATURES 7
42 #define KVM_REQ_SLEEP \
43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
45 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
46 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
47 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
48 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
49 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
51 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
52 KVM_DIRTY_LOG_INITIALLY_SET)
54 #define KVM_HAVE_MMU_RWLOCK
57 * Mode of operation configurable with kvm-arm.mode early param.
58 * See Documentation/admin-guide/kernel-parameters.txt for more information.
65 enum kvm_mode kvm_get_mode(void);
67 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
69 extern unsigned int kvm_sve_max_vl;
70 int kvm_arm_init_sve(void);
72 u32 __attribute_const__ kvm_target_cpu(void);
73 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
74 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
76 struct kvm_hyp_memcache {
78 unsigned long nr_pages;
81 static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
83 phys_addr_t (*to_pa)(void *virt))
90 static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
91 void *(*to_va)(phys_addr_t phys))
93 phys_addr_t *p = to_va(mc->head);
104 static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
105 unsigned long min_pages,
106 void *(*alloc_fn)(void *arg),
107 phys_addr_t (*to_pa)(void *virt),
110 while (mc->nr_pages < min_pages) {
111 phys_addr_t *p = alloc_fn(arg);
115 push_hyp_memcache(mc, p, to_pa);
121 static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
122 void (*free_fn)(void *virt, void *arg),
123 void *(*to_va)(phys_addr_t phys),
127 free_fn(pop_hyp_memcache(mc, to_va), arg);
130 void free_hyp_memcache(struct kvm_hyp_memcache *mc);
131 int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
138 struct kvm_vmid vmid;
141 * stage2 entry level table
143 * Two kvm_s2_mmu structures in the same VM can point to the same
144 * pgd here. This happens when running a guest using a
145 * translation regime that isn't affected by its own stage-2
146 * translation, such as a non-VHE hypervisor running at vEL2, or
147 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
148 * canonical stage-2 page tables.
150 phys_addr_t pgd_phys;
151 struct kvm_pgtable *pgt;
153 /* The last vcpu id that ran on each physical CPU */
154 int __percpu *last_vcpu_ran;
156 struct kvm_arch *arch;
159 struct kvm_arch_memory_slot {
163 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
165 * @std_bmap: Bitmap of standard secure service calls
166 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
167 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
169 struct kvm_smccc_features {
170 unsigned long std_bmap;
171 unsigned long std_hyp_bmap;
172 unsigned long vendor_hyp_bmap;
175 typedef unsigned int pkvm_handle_t;
177 struct kvm_protected_vm {
178 pkvm_handle_t handle;
179 struct kvm_hyp_memcache teardown_mc;
183 struct kvm_s2_mmu mmu;
185 /* VTCR_EL2 value for this VM */
188 /* Interrupt controller */
189 struct vgic_dist vgic;
191 /* Mandated version of PSCI */
195 * If we encounter a data abort without valid instruction syndrome
196 * information, report this to user space. User space can (and
197 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
200 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
201 /* Memory Tagging Extension enabled for the guest */
202 #define KVM_ARCH_FLAG_MTE_ENABLED 1
203 /* At least one vCPU has ran in the VM */
204 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
206 * The following two bits are used to indicate the guest's EL1
207 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
208 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
209 * Otherwise, the guest's EL1 register width has not yet been
212 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
213 #define KVM_ARCH_FLAG_EL1_32BIT 4
214 /* PSCI SYSTEM_SUSPEND enabled for the guest */
215 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
220 * VM-wide PMU filter, implemented as a bitmap and big enough for
221 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
223 unsigned long *pmu_filter;
224 struct arm_pmu *arm_pmu;
226 cpumask_var_t supported_cpus;
235 /* Hypercall features firmware registers' descriptor */
236 struct kvm_smccc_features smccc_feat;
239 * For an untrusted host VM, 'pkvm.handle' is used to lookup
240 * the associated pKVM instance in the hypervisor.
242 struct kvm_protected_vm pkvm;
245 struct kvm_vcpu_fault_info {
246 u64 esr_el2; /* Hyp Syndrom Register */
247 u64 far_el2; /* Hyp Fault Address Register */
248 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
249 u64 disr_el1; /* Deferred [SError] Status Register */
253 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
254 MPIDR_EL1, /* MultiProcessor Affinity Register */
255 CSSELR_EL1, /* Cache Size Selection Register */
256 SCTLR_EL1, /* System Control Register */
257 ACTLR_EL1, /* Auxiliary Control Register */
258 CPACR_EL1, /* Coprocessor Access Control */
259 ZCR_EL1, /* SVE Control */
260 TTBR0_EL1, /* Translation Table Base Register 0 */
261 TTBR1_EL1, /* Translation Table Base Register 1 */
262 TCR_EL1, /* Translation Control Register */
263 ESR_EL1, /* Exception Syndrome Register */
264 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
265 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
266 FAR_EL1, /* Fault Address Register */
267 MAIR_EL1, /* Memory Attribute Indirection Register */
268 VBAR_EL1, /* Vector Base Address Register */
269 CONTEXTIDR_EL1, /* Context ID Register */
270 TPIDR_EL0, /* Thread ID, User R/W */
271 TPIDRRO_EL0, /* Thread ID, User R/O */
272 TPIDR_EL1, /* Thread ID, Privileged */
273 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
274 CNTKCTL_EL1, /* Timer Control Register (EL1) */
275 PAR_EL1, /* Physical Address Register */
276 MDSCR_EL1, /* Monitor Debug System Control Register */
277 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
278 OSLSR_EL1, /* OS Lock Status Register */
279 DISR_EL1, /* Deferred Interrupt Status Register */
281 /* Performance Monitors Registers */
282 PMCR_EL0, /* Control Register */
283 PMSELR_EL0, /* Event Counter Selection Register */
284 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
285 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
286 PMCCNTR_EL0, /* Cycle Counter Register */
287 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
288 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
289 PMCCFILTR_EL0, /* Cycle Count Filter Register */
290 PMCNTENSET_EL0, /* Count Enable Set Register */
291 PMINTENSET_EL1, /* Interrupt Enable Set Register */
292 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
293 PMUSERENR_EL0, /* User Enable Register */
295 /* Pointer Authentication Registers in a strict increasing order. */
317 /* Memory Tagging Extension registers */
318 RGSR_EL1, /* Random Allocation Tag Seed Register */
319 GCR_EL1, /* Tag Control Register */
320 TFSR_EL1, /* Tag Fault Status Register (EL1) */
321 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
323 /* 32bit specific registers. Keep them at the end of the range */
324 DACR32_EL2, /* Domain Access Control Register */
325 IFSR32_EL2, /* Instruction Fault Status Register */
326 FPEXC32_EL2, /* Floating-Point Exception Control Register */
327 DBGVCR32_EL2, /* Debug Vector Catch Register */
329 NR_SYS_REGS /* Nothing after this line! */
332 struct kvm_cpu_context {
333 struct user_pt_regs regs; /* sp = sp_el0 */
340 struct user_fpsimd_state fp_regs;
342 u64 sys_regs[NR_SYS_REGS];
344 struct kvm_vcpu *__hyp_running_vcpu;
347 struct kvm_host_data {
348 struct kvm_cpu_context host_ctxt;
351 struct kvm_host_psci_config {
352 /* PSCI version used by host. */
355 /* Function IDs used by host if version is v0.1. */
356 struct psci_0_1_function_ids function_ids_0_1;
358 bool psci_0_1_cpu_suspend_implemented;
359 bool psci_0_1_cpu_on_implemented;
360 bool psci_0_1_cpu_off_implemented;
361 bool psci_0_1_migrate_implemented;
364 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
365 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
367 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
368 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
370 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
371 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
373 struct vcpu_reset_state {
380 struct kvm_vcpu_arch {
381 struct kvm_cpu_context ctxt;
384 * Guest floating point state
386 * The architecture has two main floating point extensions,
387 * the original FPSIMD and SVE. These have overlapping
388 * register views, with the FPSIMD V registers occupying the
389 * low 128 bits of the SVE Z registers. When the core
390 * floating point code saves the register state of a task it
391 * records which view it saved in fp_type.
394 enum fp_type fp_type;
395 unsigned int sve_max_vl;
398 /* Stage 2 paging state used by the hardware on next switch */
399 struct kvm_s2_mmu *hw_mmu;
401 /* Values of trap registers for the guest. */
406 /* Values of trap registers for the host before guest entry. */
409 /* Exception Information */
410 struct kvm_vcpu_fault_info fault;
412 /* Ownership of the FP regs */
416 FP_STATE_GUEST_OWNED,
419 /* Configuration flags, set once and for all before the vcpu can run */
422 /* Input flags to the hypervisor code, potentially cleared after use */
425 /* State flags for kernel bookkeeping, unused by the hypervisor code */
429 * Don't run the guest (internal implementation need).
431 * Contrary to the flags above, this is set/cleared outside of
432 * a vcpu context, and thus cannot be mixed with the flags
433 * themselves (or the flag accesses need to be made atomic).
438 * We maintain more than a single set of debug registers to support
439 * debugging the guest from the host and to maintain separate host and
440 * guest state during world switches. vcpu_debug_state are the debug
441 * registers of the vcpu as the guest sees them. host_debug_state are
442 * the host registers which are saved and restored during
443 * world switches. external_debug_state contains the debug
444 * values we want to debug the guest. This is set via the
445 * KVM_SET_GUEST_DEBUG ioctl.
447 * debug_ptr points to the set of debug registers that should be loaded
448 * onto the hardware when running the guest.
450 struct kvm_guest_debug_arch *debug_ptr;
451 struct kvm_guest_debug_arch vcpu_debug_state;
452 struct kvm_guest_debug_arch external_debug_state;
454 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
455 struct task_struct *parent_task;
458 /* {Break,watch}point registers */
459 struct kvm_guest_debug_arch regs;
460 /* Statistical profiling extension */
462 /* Self-hosted trace */
467 struct vgic_cpu vgic_cpu;
468 struct arch_timer_cpu timer_cpu;
472 * Guest registers we preserve during guest debugging.
474 * These shadow registers are updated by the kvm_handle_sys_reg
475 * trap handler if the guest accesses or updates them while we
476 * are using guest debug.
481 } guest_debug_preserved;
483 /* vcpu power state */
484 struct kvm_mp_state mp_state;
486 /* Cache some mmu pages needed inside spinlock regions */
487 struct kvm_mmu_memory_cache mmu_page_cache;
489 /* Target CPU and feature flags */
491 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
493 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
496 /* Additional reset state */
497 struct vcpu_reset_state reset_state;
507 * Each 'flag' is composed of a comma-separated triplet:
509 * - the flag-set it belongs to in the vcpu->arch structure
510 * - the value for that flag
511 * - the mask for that flag
513 * __vcpu_single_flag() builds such a triplet for a single-bit flag.
514 * unpack_vcpu_flag() extract the flag value from the triplet for
515 * direct use outside of the flag accessors.
517 #define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
519 #define __unpack_flag(_set, _f, _m) _f
520 #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
522 #define __build_check_flag(v, flagset, f, m) \
524 typeof(v->arch.flagset) *_fset; \
526 /* Check that the flags fit in the mask */ \
527 BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
528 /* Check that the flags fit in the type */ \
529 BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
532 #define __vcpu_get_flag(v, flagset, f, m) \
534 __build_check_flag(v, flagset, f, m); \
536 v->arch.flagset & (m); \
539 #define __vcpu_set_flag(v, flagset, f, m) \
541 typeof(v->arch.flagset) *fset; \
543 __build_check_flag(v, flagset, f, m); \
545 fset = &v->arch.flagset; \
546 if (HWEIGHT(m) > 1) \
551 #define __vcpu_clear_flag(v, flagset, f, m) \
553 typeof(v->arch.flagset) *fset; \
555 __build_check_flag(v, flagset, f, m); \
557 fset = &v->arch.flagset; \
561 #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
562 #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
563 #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
565 /* SVE exposed to guest */
566 #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
567 /* SVE config completed */
568 #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
569 /* PTRAUTH exposed to guest */
570 #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
572 /* Exception pending */
573 #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
575 * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
576 * be set together with an exception...
578 #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
579 /* Target EL/MODE (not a single flag, but let's abuse the macro) */
580 #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
582 /* Helpers to encode exceptions with minimum fuss */
583 #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
584 #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
585 #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
588 * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
593 #define EXCEPT_AA32_UND __vcpu_except_flags(0)
594 #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
595 #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
597 #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
598 #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
599 #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
600 #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
601 /* For AArch64 with NV (one day): */
602 #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
603 #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
604 #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
605 #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
606 /* Guest debug is live */
607 #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
608 /* Save SPE context if active */
609 #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
610 /* Save TRBE context if active */
611 #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
613 /* SVE enabled for host EL0 */
614 #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
615 /* SME enabled for EL0 */
616 #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
617 /* Physical CPU not in supported_cpus */
618 #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
619 /* WFIT instruction trapped */
620 #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
621 /* vcpu system registers loaded on physical CPU */
622 #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
623 /* Software step state is Active-pending */
624 #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
627 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
628 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
629 sve_ffr_offset((vcpu)->arch.sve_max_vl))
631 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
633 #define vcpu_sve_state_size(vcpu) ({ \
635 unsigned int __vcpu_vq; \
637 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
640 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
641 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
647 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
648 KVM_GUESTDBG_USE_SW_BP | \
649 KVM_GUESTDBG_USE_HW | \
650 KVM_GUESTDBG_SINGLESTEP)
652 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
653 vcpu_get_flag(vcpu, GUEST_HAS_SVE))
655 #ifdef CONFIG_ARM64_PTR_AUTH
656 #define vcpu_has_ptrauth(vcpu) \
657 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
658 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
659 vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
661 #define vcpu_has_ptrauth(vcpu) false
664 #define vcpu_on_unsupported_cpu(vcpu) \
665 vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
667 #define vcpu_set_on_unsupported_cpu(vcpu) \
668 vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
670 #define vcpu_clear_on_unsupported_cpu(vcpu) \
671 vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
673 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
676 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
677 * memory backed version of a register, and not the one most recently
678 * accessed by a running VCPU. For example, for userspace access or
679 * for system registers that are never context switched, but only
682 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
684 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
686 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
688 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
689 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
691 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
696 * System registers listed in the switch are not saved on every
697 * exit from the guest but are only saved on vcpu_put.
699 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
700 * should never be listed below, because the guest cannot modify its
701 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
702 * thread when emulating cross-VCPU communication.
708 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
709 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
710 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
711 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
712 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
713 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
714 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
715 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
716 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
717 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
718 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
719 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
720 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
721 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
722 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
723 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
724 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
725 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
726 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
727 case PAR_EL1: *val = read_sysreg_par(); break;
728 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
729 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
730 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
731 default: return false;
737 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
742 * System registers listed in the switch are not restored on every
743 * entry to the guest but are only restored on vcpu_load.
745 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
746 * should never be listed below, because the MPIDR should only be set
747 * once, before running the VCPU, and never changed later.
753 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
754 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
755 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
756 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
757 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
758 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
759 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
760 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
761 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
762 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
763 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
764 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
765 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
766 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
767 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
768 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
769 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
770 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
771 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
772 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
773 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
774 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
775 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
776 default: return false;
783 struct kvm_vm_stat_generic generic;
786 struct kvm_vcpu_stat {
787 struct kvm_vcpu_stat_generic generic;
792 u64 mmio_exit_kernel;
797 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
798 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
799 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
800 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
801 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
803 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
804 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
806 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
807 struct kvm_vcpu_events *events);
809 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
810 struct kvm_vcpu_events *events);
812 #define KVM_ARCH_WANT_MMU_NOTIFIER
814 void kvm_arm_halt_guest(struct kvm *kvm);
815 void kvm_arm_resume_guest(struct kvm *kvm);
817 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
819 #ifndef __KVM_NVHE_HYPERVISOR__
820 #define kvm_call_hyp_nvhe(f, ...) \
822 struct arm_smccc_res res; \
824 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
825 ##__VA_ARGS__, &res); \
826 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
832 * The couple of isb() below are there to guarantee the same behaviour
833 * on VHE as on !VHE, where the eret to EL1 acts as a context
834 * synchronization event.
836 #define kvm_call_hyp(f, ...) \
842 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
846 #define kvm_call_hyp_ret(f, ...) \
848 typeof(f(__VA_ARGS__)) ret; \
851 ret = f(__VA_ARGS__); \
854 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
859 #else /* __KVM_NVHE_HYPERVISOR__ */
860 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
861 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
862 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
863 #endif /* __KVM_NVHE_HYPERVISOR__ */
865 void force_vm_exit(const cpumask_t *mask);
867 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
868 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
870 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
871 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
872 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
873 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
874 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
875 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
876 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
878 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
880 int kvm_sys_reg_table_init(void);
883 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
884 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
886 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
887 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
890 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
891 * arrived in guest context. For arm64, any event that arrives while a vCPU is
892 * loaded is considered to be "in guest".
894 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
896 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
899 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
900 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
901 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
903 bool kvm_arm_pvtime_supported(void);
904 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
905 struct kvm_device_attr *attr);
906 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
907 struct kvm_device_attr *attr);
908 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
909 struct kvm_device_attr *attr);
911 extern unsigned int kvm_arm_vmid_bits;
912 int kvm_arm_vmid_alloc_init(void);
913 void kvm_arm_vmid_alloc_free(void);
914 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
915 void kvm_arm_vmid_clear_active(void);
917 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
919 vcpu_arch->steal.base = GPA_INVALID;
922 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
924 return (vcpu_arch->steal.base != GPA_INVALID);
927 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
929 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
931 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
933 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
935 /* The host's MPIDR is immutable, so let's set it up at boot time */
936 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
939 static inline bool kvm_system_needs_idmapped_vectors(void)
941 return cpus_have_const_cap(ARM64_SPECTRE_V3A);
944 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
946 static inline void kvm_arch_hardware_unsetup(void) {}
947 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
948 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
950 void kvm_arm_init_debug(void);
951 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
952 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
953 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
954 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
956 #define kvm_vcpu_os_lock_enabled(vcpu) \
957 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
959 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
960 struct kvm_device_attr *attr);
961 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
962 struct kvm_device_attr *attr);
963 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
964 struct kvm_device_attr *attr);
966 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
967 struct kvm_arm_copy_mte_tags *copy_tags);
969 /* Guest/host FPSIMD coordination helpers */
970 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
971 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
972 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
973 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
974 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
975 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
977 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
979 return (!has_vhe() && attr->exclude_host);
982 /* Flags for host debug state */
983 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
984 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
987 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
988 void kvm_clr_pmu_events(u32 clr);
990 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
991 static inline void kvm_clr_pmu_events(u32 clr) {}
994 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
995 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
997 int kvm_set_ipa_limit(void);
999 #define __KVM_HAVE_ARCH_VM_ALLOC
1000 struct kvm *kvm_arch_alloc_vm(void);
1002 static inline bool kvm_vm_is_protected(struct kvm *kvm)
1007 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
1009 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
1010 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
1012 #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
1014 #define kvm_has_mte(kvm) \
1015 (system_supports_mte() && \
1016 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
1018 #define kvm_supports_32bit_el0() \
1019 (system_supports_32bit_el0() && \
1020 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
1022 int kvm_trng_call(struct kvm_vcpu *vcpu);
1024 extern phys_addr_t hyp_mem_base;
1025 extern phys_addr_t hyp_mem_size;
1026 void __init kvm_hyp_reserve(void);
1028 static inline void kvm_hyp_reserve(void) { }
1031 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
1032 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
1034 #endif /* __ARM64_KVM_HOST_H__ */