1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <asm/arch_gicv3.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/daifflags.h>
25 #include <asm/fpsimd.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/thread_info.h>
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32 #define KVM_USER_MEM_SLOTS 512
33 #define KVM_HALT_POLL_NS_DEFAULT 500000
35 #include <kvm/arm_vgic.h>
36 #include <kvm/arm_arch_timer.h>
37 #include <kvm/arm_pmu.h>
39 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41 #define KVM_VCPU_MAX_FEATURES 7
43 #define KVM_REQ_SLEEP \
44 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
45 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
46 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
47 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
48 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
50 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
51 KVM_DIRTY_LOG_INITIALLY_SET)
53 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
55 extern unsigned int kvm_sve_max_vl;
56 int kvm_arm_init_sve(void);
58 int __attribute_const__ kvm_target_cpu(void);
59 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
60 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
61 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
62 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
65 /* The VMID generation used for the virt. memory system */
74 * stage2 entry level table
76 * Two kvm_s2_mmu structures in the same VM can point to the same
77 * pgd here. This happens when running a guest using a
78 * translation regime that isn't affected by its own stage-2
79 * translation, such as a non-VHE hypervisor running at vEL2, or
80 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
81 * canonical stage-2 page tables.
84 struct kvm_pgtable *pgt;
86 /* The last vcpu id that ran on each physical CPU */
87 int __percpu *last_vcpu_ran;
92 struct kvm_arch_memory_slot {
96 struct kvm_s2_mmu mmu;
98 /* VTCR_EL2 value for this VM */
101 /* The maximum number of vCPUs depends on the used GIC model */
104 /* Interrupt controller */
105 struct vgic_dist vgic;
107 /* Mandated version of PSCI */
111 * If we encounter a data abort without valid instruction syndrome
112 * information, report this to user space. User space can (and
113 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
116 bool return_nisv_io_abort_to_user;
119 * VM-wide PMU filter, implemented as a bitmap and big enough for
120 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
122 unsigned long *pmu_filter;
126 struct kvm_vcpu_fault_info {
127 u32 esr_el2; /* Hyp Syndrom Register */
128 u64 far_el2; /* Hyp Fault Address Register */
129 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
130 u64 disr_el1; /* Deferred [SError] Status Register */
134 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
135 MPIDR_EL1, /* MultiProcessor Affinity Register */
136 CSSELR_EL1, /* Cache Size Selection Register */
137 SCTLR_EL1, /* System Control Register */
138 ACTLR_EL1, /* Auxiliary Control Register */
139 CPACR_EL1, /* Coprocessor Access Control */
140 ZCR_EL1, /* SVE Control */
141 TTBR0_EL1, /* Translation Table Base Register 0 */
142 TTBR1_EL1, /* Translation Table Base Register 1 */
143 TCR_EL1, /* Translation Control Register */
144 ESR_EL1, /* Exception Syndrome Register */
145 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
146 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
147 FAR_EL1, /* Fault Address Register */
148 MAIR_EL1, /* Memory Attribute Indirection Register */
149 VBAR_EL1, /* Vector Base Address Register */
150 CONTEXTIDR_EL1, /* Context ID Register */
151 TPIDR_EL0, /* Thread ID, User R/W */
152 TPIDRRO_EL0, /* Thread ID, User R/O */
153 TPIDR_EL1, /* Thread ID, Privileged */
154 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
155 CNTKCTL_EL1, /* Timer Control Register (EL1) */
156 PAR_EL1, /* Physical Address Register */
157 MDSCR_EL1, /* Monitor Debug System Control Register */
158 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
159 DISR_EL1, /* Deferred Interrupt Status Register */
161 /* Performance Monitors Registers */
162 PMCR_EL0, /* Control Register */
163 PMSELR_EL0, /* Event Counter Selection Register */
164 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
165 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
166 PMCCNTR_EL0, /* Cycle Counter Register */
167 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
168 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
169 PMCCFILTR_EL0, /* Cycle Count Filter Register */
170 PMCNTENSET_EL0, /* Count Enable Set Register */
171 PMINTENSET_EL1, /* Interrupt Enable Set Register */
172 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
173 PMSWINC_EL0, /* Software Increment Register */
174 PMUSERENR_EL0, /* User Enable Register */
176 /* Pointer Authentication Registers in a strict increasing order. */
198 /* 32bit specific registers. Keep them at the end of the range */
199 DACR32_EL2, /* Domain Access Control Register */
200 IFSR32_EL2, /* Instruction Fault Status Register */
201 FPEXC32_EL2, /* Floating-Point Exception Control Register */
202 DBGVCR32_EL2, /* Debug Vector Catch Register */
204 NR_SYS_REGS /* Nothing after this line! */
208 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
209 #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
210 #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
211 #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
212 #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
213 #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
214 #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
215 #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
216 #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
217 #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
218 #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
219 #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
220 #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
221 #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
222 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
223 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
224 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
225 #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
226 #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
227 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
228 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
229 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
230 #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
231 #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
232 #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
233 #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
234 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
235 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
236 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
238 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
239 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
240 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
241 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
242 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
243 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
244 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
245 #define cp14_DBGVCR (DBGVCR32_EL2 * 2)
247 #define NR_COPRO_REGS (NR_SYS_REGS * 2)
249 struct kvm_cpu_context {
250 struct user_pt_regs regs; /* sp = sp_el0 */
257 struct user_fpsimd_state fp_regs;
260 u64 sys_regs[NR_SYS_REGS];
261 u32 copro[NR_COPRO_REGS];
264 struct kvm_vcpu *__hyp_running_vcpu;
267 struct kvm_pmu_events {
272 struct kvm_host_data {
273 struct kvm_cpu_context host_ctxt;
274 struct kvm_pmu_events pmu_events;
277 struct vcpu_reset_state {
284 struct kvm_vcpu_arch {
285 struct kvm_cpu_context ctxt;
287 unsigned int sve_max_vl;
289 /* Stage 2 paging state used by the hardware on next switch */
290 struct kvm_s2_mmu *hw_mmu;
292 /* HYP configuration */
296 /* Exception Information */
297 struct kvm_vcpu_fault_info fault;
299 /* State of various workarounds, see kvm_asm.h for bit assignment */
300 u64 workaround_flags;
302 /* Miscellaneous vcpu state flags */
306 * We maintain more than a single set of debug registers to support
307 * debugging the guest from the host and to maintain separate host and
308 * guest state during world switches. vcpu_debug_state are the debug
309 * registers of the vcpu as the guest sees them. host_debug_state are
310 * the host registers which are saved and restored during
311 * world switches. external_debug_state contains the debug
312 * values we want to debug the guest. This is set via the
313 * KVM_SET_GUEST_DEBUG ioctl.
315 * debug_ptr points to the set of debug registers that should be loaded
316 * onto the hardware when running the guest.
318 struct kvm_guest_debug_arch *debug_ptr;
319 struct kvm_guest_debug_arch vcpu_debug_state;
320 struct kvm_guest_debug_arch external_debug_state;
322 struct thread_info *host_thread_info; /* hyp VA */
323 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
326 /* {Break,watch}point registers */
327 struct kvm_guest_debug_arch regs;
328 /* Statistical profiling extension */
333 struct vgic_cpu vgic_cpu;
334 struct arch_timer_cpu timer_cpu;
338 * Anything that is not used directly from assembly code goes
343 * Guest registers we preserve during guest debugging.
345 * These shadow registers are updated by the kvm_handle_sys_reg
346 * trap handler if the guest accesses or updates them while we
347 * are using guest debug.
351 } guest_debug_preserved;
353 /* vcpu power-off state */
356 /* Don't run the guest (internal implementation need) */
359 /* Cache some mmu pages needed inside spinlock regions */
360 struct kvm_mmu_memory_cache mmu_page_cache;
362 /* Target CPU and feature flags */
364 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
366 /* Detect first run of a vcpu */
369 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
372 /* Additional reset state */
373 struct vcpu_reset_state reset_state;
375 /* True when deferrable sysregs are loaded on the physical CPU,
376 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
377 bool sysregs_loaded_on_cpu;
386 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
387 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
388 sve_ffr_offset((vcpu)->arch.sve_max_vl)))
390 #define vcpu_sve_state_size(vcpu) ({ \
392 unsigned int __vcpu_vq; \
394 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
397 __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
398 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
404 /* vcpu_arch flags field values: */
405 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
406 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
407 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
408 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
409 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
410 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
411 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
412 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
414 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
415 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
417 #ifdef CONFIG_ARM64_PTR_AUTH
418 #define vcpu_has_ptrauth(vcpu) \
419 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
420 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
421 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
423 #define vcpu_has_ptrauth(vcpu) false
426 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
429 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
430 * memory backed version of a register, and not the one most recently
431 * accessed by a running VCPU. For example, for userspace access or
432 * for system registers that are never context switched, but only
435 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
437 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
439 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
441 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
442 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
445 * CP14 and CP15 live in the same array, as they are backed by the
446 * same system registers.
448 #define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
450 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
451 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
454 ulong remote_tlb_flush;
457 struct kvm_vcpu_stat {
458 u64 halt_successful_poll;
459 u64 halt_attempted_poll;
460 u64 halt_poll_success_ns;
461 u64 halt_poll_fail_ns;
462 u64 halt_poll_invalid;
468 u64 mmio_exit_kernel;
472 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
473 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
474 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
475 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
476 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
477 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
478 struct kvm_vcpu_events *events);
480 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
481 struct kvm_vcpu_events *events);
483 #define KVM_ARCH_WANT_MMU_NOTIFIER
484 int kvm_unmap_hva_range(struct kvm *kvm,
485 unsigned long start, unsigned long end, unsigned flags);
486 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
487 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
488 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
490 void kvm_arm_halt_guest(struct kvm *kvm);
491 void kvm_arm_resume_guest(struct kvm *kvm);
493 #define kvm_call_hyp_nvhe(f, ...) \
495 struct arm_smccc_res res; \
497 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
498 ##__VA_ARGS__, &res); \
499 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
505 * The couple of isb() below are there to guarantee the same behaviour
506 * on VHE as on !VHE, where the eret to EL1 acts as a context
507 * synchronization event.
509 #define kvm_call_hyp(f, ...) \
515 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
519 #define kvm_call_hyp_ret(f, ...) \
521 typeof(f(__VA_ARGS__)) ret; \
524 ret = f(__VA_ARGS__); \
527 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
533 void force_vm_exit(const cpumask_t *mask);
534 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
536 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
537 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
540 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
541 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
543 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
544 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
546 int kvm_perf_init(void);
547 int kvm_perf_teardown(void);
549 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
550 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
551 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
553 bool kvm_arm_pvtime_supported(void);
554 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
555 struct kvm_device_attr *attr);
556 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
557 struct kvm_device_attr *attr);
558 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
559 struct kvm_device_attr *attr);
561 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
563 vcpu_arch->steal.base = GPA_INVALID;
566 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
568 return (vcpu_arch->steal.base != GPA_INVALID);
571 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
573 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
575 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
577 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
579 /* The host's MPIDR is immutable, so let's set it up at boot time */
580 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
583 static inline bool kvm_arch_requires_vhe(void)
586 * The Arm architecture specifies that implementation of SVE
587 * requires VHE also to be implemented. The KVM code for arm64
588 * relies on this when SVE is present:
590 if (system_supports_sve())
596 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
598 static inline void kvm_arch_hardware_unsetup(void) {}
599 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
600 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
601 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
603 void kvm_arm_init_debug(void);
604 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
605 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
606 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
607 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
608 struct kvm_device_attr *attr);
609 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
610 struct kvm_device_attr *attr);
611 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
612 struct kvm_device_attr *attr);
614 /* Guest/host FPSIMD coordination helpers */
615 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
616 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
617 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
618 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
620 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
622 return (!has_vhe() && attr->exclude_host);
625 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
626 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
628 return kvm_arch_vcpu_run_map_fp(vcpu);
631 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
632 void kvm_clr_pmu_events(u32 clr);
634 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
635 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
637 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
638 static inline void kvm_clr_pmu_events(u32 clr) {}
641 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
642 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
644 int kvm_set_ipa_limit(void);
646 #define __KVM_HAVE_ARCH_VM_ALLOC
647 struct kvm *kvm_arch_alloc_vm(void);
648 void kvm_arch_free_vm(struct kvm *kvm);
650 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
652 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
653 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
655 #define kvm_arm_vcpu_sve_finalized(vcpu) \
656 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
658 #endif /* __ARM64_KVM_HOST_H__ */