1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
10 #include <asm/hyp_image.h>
13 #define ARM_EXIT_WITH_SERROR_BIT 31
14 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
18 #define ARM_EXCEPTION_IRQ 0
19 #define ARM_EXCEPTION_EL1_SERROR 1
20 #define ARM_EXCEPTION_TRAP 2
21 #define ARM_EXCEPTION_IL 3
22 /* The hyp-stub will return this for any kvm_call_hyp() call */
23 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
25 #define kvm_arm_exception_type \
26 {ARM_EXCEPTION_IRQ, "IRQ" }, \
27 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
28 {ARM_EXCEPTION_TRAP, "TRAP" }, \
29 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
32 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33 * that jumps over this.
35 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
37 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
39 #define KVM_HOST_SMCCC_ID(id) \
40 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
42 ARM_SMCCC_OWNER_VENDOR_HYP, \
45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
48 #define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
49 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
50 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
51 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
52 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
53 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
54 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
55 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
56 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
57 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
58 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
59 #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
60 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
61 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
67 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
68 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
71 * Define a pair of symbols sharing the same name but one defined in
72 * VHE and the other in nVHE hyp implementations.
74 #define DECLARE_KVM_HYP_SYM(sym) \
75 DECLARE_KVM_VHE_SYM(sym); \
76 DECLARE_KVM_NVHE_SYM(sym)
78 #define DECLARE_KVM_VHE_PER_CPU(type, sym) \
79 DECLARE_PER_CPU(type, sym)
80 #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
81 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
83 #define DECLARE_KVM_HYP_PER_CPU(type, sym) \
84 DECLARE_KVM_VHE_PER_CPU(type, sym); \
85 DECLARE_KVM_NVHE_PER_CPU(type, sym)
88 * Compute pointer to a symbol defined in nVHE percpu region.
89 * Returns NULL if percpu memory has not been allocated yet.
91 #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
92 #define per_cpu_ptr_nvhe_sym(sym, cpu) \
94 unsigned long base, off; \
95 base = kvm_arm_hyp_percpu_base[cpu]; \
96 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
97 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
98 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
101 #if defined(__KVM_NVHE_HYPERVISOR__)
103 #define CHOOSE_NVHE_SYM(sym) sym
104 #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
106 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
107 extern void *__nvhe_undefined_symbol;
108 #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
109 #define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
110 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
112 #elif defined(__KVM_VHE_HYPERVISOR__)
114 #define CHOOSE_VHE_SYM(sym) sym
115 #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
117 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
118 extern void *__vhe_undefined_symbol;
119 #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
120 #define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
121 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
128 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
129 * to has_vhe(). has_vhe() is implemented as a *final* capability,
130 * while this is used early at boot time, when the capabilities are
133 * - Don't let the nVHE hypervisor have access to this, as it will
134 * pick the *wrong* symbol (yes, it runs at EL2...).
136 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
137 ? CHOOSE_VHE_SYM(sym) \
138 : CHOOSE_NVHE_SYM(sym))
140 #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
141 ? this_cpu_ptr(&sym) \
142 : this_cpu_ptr_nvhe_sym(sym))
144 #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
145 ? per_cpu_ptr(&sym, cpu) \
146 : per_cpu_ptr_nvhe_sym(sym, cpu))
148 #define CHOOSE_VHE_SYM(sym) sym
149 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
153 /* Translate a kernel address @ptr into its equivalent linear mapping */
154 #define kvm_ksym_ref(ptr) \
157 if (!is_kernel_in_hyp_mode()) \
158 val = lm_alias((ptr)); \
161 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
167 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
168 DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
169 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
170 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
171 #define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
172 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
174 extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
175 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
176 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
178 extern atomic_t arm64_el2_vector_last_slot;
179 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
180 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
182 extern void __kvm_flush_vm_context(void);
183 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
185 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
186 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
188 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
190 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
192 extern void __kvm_enable_ssbs(void);
194 extern u64 __vgic_v3_get_ich_vtr_el2(void);
195 extern u64 __vgic_v3_read_vmcr(void);
196 extern void __vgic_v3_write_vmcr(u32 vmcr);
197 extern void __vgic_v3_init_lrs(void);
199 extern u32 __kvm_get_mdcr_el2(void);
201 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
203 #if defined(GCC_VERSION) && GCC_VERSION < 50000
204 #define SYM_CONSTRAINT "i"
206 #define SYM_CONSTRAINT "S"
210 * Obtain the PC-relative address of a kernel symbol
213 * The goal of this macro is to return a symbol's address based on a
214 * PC-relative computation, as opposed to a loading the VA from a
215 * constant pool or something similar. This works well for HYP, as an
216 * absolute VA is guaranteed to be wrong. Only use this if trying to
217 * obtain the address of a symbol (i.e. not something you obtained by
218 * following a pointer).
220 #define hyp_symbol_addr(s) \
223 asm("adrp %0, %1\n" \
224 "add %0, %0, :lo12:%1\n" \
225 : "=r" (addr) : SYM_CONSTRAINT (&s)); \
229 #define __KVM_EXTABLE(from, to) \
230 " .pushsection __kvm_ex_table, \"a\"\n" \
232 " .long (" #from " - .), (" #to " - .)\n" \
236 #define __kvm_at(at_op, addr) \
238 int __kvm_at_err = 0; \
241 " mrs %1, spsr_el2\n" \
242 " mrs %2, elr_el2\n" \
243 "1: at "at_op", %3\n" \
246 "2: msr spsr_el2, %1\n" \
247 " msr elr_el2, %2\n" \
250 __KVM_EXTABLE(1b, 2b) \
251 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
252 : "r" (addr), "i" (-EFAULT)); \
257 #else /* __ASSEMBLY__ */
259 .macro get_host_ctxt reg, tmp
260 adr_this_cpu \reg, kvm_host_data, \tmp
261 add \reg, \reg, #HOST_DATA_CONTEXT
264 .macro get_vcpu_ptr vcpu, ctxt
265 get_host_ctxt \ctxt, \vcpu
266 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
269 .macro get_loaded_vcpu vcpu, ctxt
270 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
271 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
274 .macro set_loaded_vcpu vcpu, ctxt, tmp
275 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
276 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
280 * KVM extable for unexpected exceptions.
281 * In the same format _asm_extable, but output to a different section so that
282 * it can be mapped to EL2. The KVM version is not sorted. The caller must
284 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
285 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
287 .macro _kvm_extable, from, to
288 .pushsection __kvm_ex_table, "a"
290 .long (\from - .), (\to - .)
294 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
295 #define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
296 #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
299 * We treat x18 as callee-saved as the host may use it as a platform
300 * register (e.g. for shadow call stack).
302 .macro save_callee_saved_regs ctxt
303 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
304 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
305 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
306 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
307 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
308 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
309 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
312 .macro restore_callee_saved_regs ctxt
313 // We require \ctxt is not x18-x28
314 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
315 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
316 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
317 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
318 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
319 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
320 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
323 .macro save_sp_el0 ctxt, tmp
325 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
328 .macro restore_sp_el0 ctxt, tmp
329 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
335 #endif /* __ARM_KVM_ASM_H__ */