1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
28 #include <asm/kvm_asm.h>
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 #define KVM_HALT_POLL_NS_DEFAULT 500000
34 #include <kvm/arm_vgic.h>
35 #include <kvm/arm_arch_timer.h>
36 #include <kvm/arm_pmu.h>
38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
40 #define KVM_VCPU_MAX_FEATURES 7
42 #define KVM_REQ_SLEEP \
43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
45 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
46 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
47 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
48 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 KVM_DIRTY_LOG_INITIALLY_SET)
54 * Mode of operation configurable with kvm-arm.mode early param.
55 * See Documentation/admin-guide/kernel-parameters.txt for more information.
62 enum kvm_mode kvm_get_mode(void);
64 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
66 extern unsigned int kvm_sve_max_vl;
67 int kvm_arm_init_sve(void);
69 u32 __attribute_const__ kvm_target_cpu(void);
70 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
71 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
74 /* The VMID generation used for the virt. memory system */
83 * stage2 entry level table
85 * Two kvm_s2_mmu structures in the same VM can point to the same
86 * pgd here. This happens when running a guest using a
87 * translation regime that isn't affected by its own stage-2
88 * translation, such as a non-VHE hypervisor running at vEL2, or
89 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
90 * canonical stage-2 page tables.
93 struct kvm_pgtable *pgt;
95 /* The last vcpu id that ran on each physical CPU */
96 int __percpu *last_vcpu_ran;
98 struct kvm_arch *arch;
101 struct kvm_arch_memory_slot {
105 struct kvm_s2_mmu mmu;
107 /* VTCR_EL2 value for this VM */
110 /* The maximum number of vCPUs depends on the used GIC model */
113 /* Interrupt controller */
114 struct vgic_dist vgic;
116 /* Mandated version of PSCI */
120 * If we encounter a data abort without valid instruction syndrome
121 * information, report this to user space. User space can (and
122 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
125 bool return_nisv_io_abort_to_user;
128 * VM-wide PMU filter, implemented as a bitmap and big enough for
129 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
131 unsigned long *pmu_filter;
137 /* Memory Tagging Extension enabled for the guest */
141 struct kvm_vcpu_fault_info {
142 u32 esr_el2; /* Hyp Syndrom Register */
143 u64 far_el2; /* Hyp Fault Address Register */
144 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
145 u64 disr_el1; /* Deferred [SError] Status Register */
149 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
150 MPIDR_EL1, /* MultiProcessor Affinity Register */
151 CSSELR_EL1, /* Cache Size Selection Register */
152 SCTLR_EL1, /* System Control Register */
153 ACTLR_EL1, /* Auxiliary Control Register */
154 CPACR_EL1, /* Coprocessor Access Control */
155 ZCR_EL1, /* SVE Control */
156 TTBR0_EL1, /* Translation Table Base Register 0 */
157 TTBR1_EL1, /* Translation Table Base Register 1 */
158 TCR_EL1, /* Translation Control Register */
159 ESR_EL1, /* Exception Syndrome Register */
160 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
161 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
162 FAR_EL1, /* Fault Address Register */
163 MAIR_EL1, /* Memory Attribute Indirection Register */
164 VBAR_EL1, /* Vector Base Address Register */
165 CONTEXTIDR_EL1, /* Context ID Register */
166 TPIDR_EL0, /* Thread ID, User R/W */
167 TPIDRRO_EL0, /* Thread ID, User R/O */
168 TPIDR_EL1, /* Thread ID, Privileged */
169 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
170 CNTKCTL_EL1, /* Timer Control Register (EL1) */
171 PAR_EL1, /* Physical Address Register */
172 MDSCR_EL1, /* Monitor Debug System Control Register */
173 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
174 DISR_EL1, /* Deferred Interrupt Status Register */
176 /* Performance Monitors Registers */
177 PMCR_EL0, /* Control Register */
178 PMSELR_EL0, /* Event Counter Selection Register */
179 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
180 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
181 PMCCNTR_EL0, /* Cycle Counter Register */
182 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
183 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
184 PMCCFILTR_EL0, /* Cycle Count Filter Register */
185 PMCNTENSET_EL0, /* Count Enable Set Register */
186 PMINTENSET_EL1, /* Interrupt Enable Set Register */
187 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
188 PMUSERENR_EL0, /* User Enable Register */
190 /* Pointer Authentication Registers in a strict increasing order. */
212 /* Memory Tagging Extension registers */
213 RGSR_EL1, /* Random Allocation Tag Seed Register */
214 GCR_EL1, /* Tag Control Register */
215 TFSR_EL1, /* Tag Fault Status Register (EL1) */
216 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
218 /* 32bit specific registers. Keep them at the end of the range */
219 DACR32_EL2, /* Domain Access Control Register */
220 IFSR32_EL2, /* Instruction Fault Status Register */
221 FPEXC32_EL2, /* Floating-Point Exception Control Register */
222 DBGVCR32_EL2, /* Debug Vector Catch Register */
224 NR_SYS_REGS /* Nothing after this line! */
227 struct kvm_cpu_context {
228 struct user_pt_regs regs; /* sp = sp_el0 */
235 struct user_fpsimd_state fp_regs;
237 u64 sys_regs[NR_SYS_REGS];
239 struct kvm_vcpu *__hyp_running_vcpu;
242 struct kvm_pmu_events {
247 struct kvm_host_data {
248 struct kvm_cpu_context host_ctxt;
249 struct kvm_pmu_events pmu_events;
252 struct kvm_host_psci_config {
253 /* PSCI version used by host. */
256 /* Function IDs used by host if version is v0.1. */
257 struct psci_0_1_function_ids function_ids_0_1;
259 bool psci_0_1_cpu_suspend_implemented;
260 bool psci_0_1_cpu_on_implemented;
261 bool psci_0_1_cpu_off_implemented;
262 bool psci_0_1_migrate_implemented;
265 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
266 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
268 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
269 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
271 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
272 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
274 struct vcpu_reset_state {
281 struct kvm_vcpu_arch {
282 struct kvm_cpu_context ctxt;
284 unsigned int sve_max_vl;
286 /* Stage 2 paging state used by the hardware on next switch */
287 struct kvm_s2_mmu *hw_mmu;
289 /* Values of trap registers for the guest. */
294 /* Values of trap registers for the host before guest entry. */
297 /* Exception Information */
298 struct kvm_vcpu_fault_info fault;
300 /* Miscellaneous vcpu state flags */
304 * We maintain more than a single set of debug registers to support
305 * debugging the guest from the host and to maintain separate host and
306 * guest state during world switches. vcpu_debug_state are the debug
307 * registers of the vcpu as the guest sees them. host_debug_state are
308 * the host registers which are saved and restored during
309 * world switches. external_debug_state contains the debug
310 * values we want to debug the guest. This is set via the
311 * KVM_SET_GUEST_DEBUG ioctl.
313 * debug_ptr points to the set of debug registers that should be loaded
314 * onto the hardware when running the guest.
316 struct kvm_guest_debug_arch *debug_ptr;
317 struct kvm_guest_debug_arch vcpu_debug_state;
318 struct kvm_guest_debug_arch external_debug_state;
320 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
321 struct task_struct *parent_task;
324 /* {Break,watch}point registers */
325 struct kvm_guest_debug_arch regs;
326 /* Statistical profiling extension */
328 /* Self-hosted trace */
333 struct vgic_cpu vgic_cpu;
334 struct arch_timer_cpu timer_cpu;
338 * Anything that is not used directly from assembly code goes
343 * Guest registers we preserve during guest debugging.
345 * These shadow registers are updated by the kvm_handle_sys_reg
346 * trap handler if the guest accesses or updates them while we
347 * are using guest debug.
351 } guest_debug_preserved;
353 /* vcpu power-off state */
356 /* Don't run the guest (internal implementation need) */
359 /* Cache some mmu pages needed inside spinlock regions */
360 struct kvm_mmu_memory_cache mmu_page_cache;
362 /* Target CPU and feature flags */
364 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
366 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
369 /* Additional reset state */
370 struct vcpu_reset_state reset_state;
372 /* True when deferrable sysregs are loaded on the physical CPU,
373 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
374 bool sysregs_loaded_on_cpu;
383 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
384 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
385 sve_ffr_offset((vcpu)->arch.sve_max_vl))
387 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
389 #define vcpu_sve_state_size(vcpu) ({ \
391 unsigned int __vcpu_vq; \
393 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
396 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
397 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
403 /* vcpu_arch flags field values: */
404 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
405 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
406 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
407 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
408 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
409 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
410 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
411 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
413 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
414 * set together with an exception...
416 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
417 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
419 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
420 * take the following values:
424 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
425 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
426 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
428 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
429 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
430 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
431 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
432 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
433 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
435 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
436 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
437 #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
439 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
440 KVM_GUESTDBG_USE_SW_BP | \
441 KVM_GUESTDBG_USE_HW | \
442 KVM_GUESTDBG_SINGLESTEP)
444 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
445 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
447 #ifdef CONFIG_ARM64_PTR_AUTH
448 #define vcpu_has_ptrauth(vcpu) \
449 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
450 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
451 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
453 #define vcpu_has_ptrauth(vcpu) false
456 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
459 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
460 * memory backed version of a register, and not the one most recently
461 * accessed by a running VCPU. For example, for userspace access or
462 * for system registers that are never context switched, but only
465 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
467 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
469 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
471 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
472 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
474 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
479 * System registers listed in the switch are not saved on every
480 * exit from the guest but are only saved on vcpu_put.
482 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
483 * should never be listed below, because the guest cannot modify its
484 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
485 * thread when emulating cross-VCPU communication.
491 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
492 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
493 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
494 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
495 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
496 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
497 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
498 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
499 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
500 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
501 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
502 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
503 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
504 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
505 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
506 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
507 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
508 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
509 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
510 case PAR_EL1: *val = read_sysreg_par(); break;
511 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
512 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
513 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
514 default: return false;
520 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
525 * System registers listed in the switch are not restored on every
526 * entry to the guest but are only restored on vcpu_load.
528 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
529 * should never be listed below, because the MPIDR should only be set
530 * once, before running the VCPU, and never changed later.
536 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
537 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
538 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
539 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
540 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
541 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
542 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
543 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
544 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
545 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
546 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
547 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
548 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
549 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
550 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
551 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
552 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
553 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
554 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
555 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
556 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
557 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
558 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
559 default: return false;
566 struct kvm_vm_stat_generic generic;
569 struct kvm_vcpu_stat {
570 struct kvm_vcpu_stat_generic generic;
575 u64 mmio_exit_kernel;
580 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
581 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
582 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
583 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
584 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
586 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
587 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
588 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
589 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
591 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
592 struct kvm_vcpu_events *events);
594 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
595 struct kvm_vcpu_events *events);
597 #define KVM_ARCH_WANT_MMU_NOTIFIER
599 void kvm_arm_halt_guest(struct kvm *kvm);
600 void kvm_arm_resume_guest(struct kvm *kvm);
602 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
604 #ifndef __KVM_NVHE_HYPERVISOR__
605 #define kvm_call_hyp_nvhe(f, ...) \
607 struct arm_smccc_res res; \
609 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
610 ##__VA_ARGS__, &res); \
611 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
617 * The couple of isb() below are there to guarantee the same behaviour
618 * on VHE as on !VHE, where the eret to EL1 acts as a context
619 * synchronization event.
621 #define kvm_call_hyp(f, ...) \
627 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
631 #define kvm_call_hyp_ret(f, ...) \
633 typeof(f(__VA_ARGS__)) ret; \
636 ret = f(__VA_ARGS__); \
639 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
644 #else /* __KVM_NVHE_HYPERVISOR__ */
645 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
646 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
647 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
648 #endif /* __KVM_NVHE_HYPERVISOR__ */
650 void force_vm_exit(const cpumask_t *mask);
652 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
653 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
655 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
656 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
657 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
658 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
659 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
660 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
662 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
664 void kvm_sys_reg_table_init(void);
667 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
668 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
670 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
671 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
673 int kvm_perf_init(void);
674 int kvm_perf_teardown(void);
676 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
677 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
678 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
680 bool kvm_arm_pvtime_supported(void);
681 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
682 struct kvm_device_attr *attr);
683 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
684 struct kvm_device_attr *attr);
685 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
686 struct kvm_device_attr *attr);
688 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
690 vcpu_arch->steal.base = GPA_INVALID;
693 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
695 return (vcpu_arch->steal.base != GPA_INVALID);
698 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
700 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
702 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
704 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
706 /* The host's MPIDR is immutable, so let's set it up at boot time */
707 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
710 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
712 static inline void kvm_arch_hardware_unsetup(void) {}
713 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
714 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
716 void kvm_arm_init_debug(void);
717 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
718 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
719 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
720 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
721 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
722 struct kvm_device_attr *attr);
723 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
724 struct kvm_device_attr *attr);
725 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
726 struct kvm_device_attr *attr);
728 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
729 struct kvm_arm_copy_mte_tags *copy_tags);
731 /* Guest/host FPSIMD coordination helpers */
732 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
733 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
734 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
735 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
736 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
737 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
739 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
741 return (!has_vhe() && attr->exclude_host);
744 /* Flags for host debug state */
745 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
746 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
749 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
750 void kvm_clr_pmu_events(u32 clr);
752 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
753 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
755 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
756 static inline void kvm_clr_pmu_events(u32 clr) {}
759 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
760 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
762 int kvm_set_ipa_limit(void);
764 #define __KVM_HAVE_ARCH_VM_ALLOC
765 struct kvm *kvm_arch_alloc_vm(void);
767 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
769 static inline bool kvm_vm_is_protected(struct kvm *kvm)
774 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
776 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
777 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
779 #define kvm_arm_vcpu_sve_finalized(vcpu) \
780 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
782 #define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
783 #define kvm_vcpu_has_pmu(vcpu) \
784 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
786 int kvm_trng_call(struct kvm_vcpu *vcpu);
788 extern phys_addr_t hyp_mem_base;
789 extern phys_addr_t hyp_mem_size;
790 void __init kvm_hyp_reserve(void);
792 static inline void kvm_hyp_reserve(void) { }
795 #endif /* __ARM64_KVM_HOST_H__ */