1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
12 #define ARM_EXIT_WITH_SERROR_BIT 31
13 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
14 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
15 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IRQ 0
18 #define ARM_EXCEPTION_EL1_SERROR 1
19 #define ARM_EXCEPTION_TRAP 2
20 #define ARM_EXCEPTION_IL 3
21 /* The hyp-stub will return this for any kvm_call_hyp() call */
22 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
24 #define kvm_arm_exception_type \
25 {ARM_EXCEPTION_IRQ, "IRQ" }, \
26 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
27 {ARM_EXCEPTION_TRAP, "TRAP" }, \
28 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
31 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
32 * that jumps over this.
34 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
36 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
43 * Translate name of a symbol defined in nVHE hyp to the name seen
44 * by kernel proper. All nVHE symbols are prefixed by the build system
45 * to avoid clashes with the VHE variants.
47 #define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
49 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
50 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
53 * Define a pair of symbols sharing the same name but one defined in
54 * VHE and the other in nVHE hyp implementations.
56 #define DECLARE_KVM_HYP_SYM(sym) \
57 DECLARE_KVM_VHE_SYM(sym); \
58 DECLARE_KVM_NVHE_SYM(sym)
60 #define CHOOSE_VHE_SYM(sym) sym
61 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
63 #ifndef __KVM_NVHE_HYPERVISOR__
67 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
68 * to has_vhe(). has_vhe() is implemented as a *final* capability,
69 * while this is used early at boot time, when the capabilities are
72 * - Don't let the nVHE hypervisor have access to this, as it will
73 * pick the *wrong* symbol (yes, it runs at EL2...).
75 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
76 : CHOOSE_NVHE_SYM(sym))
78 /* The nVHE hypervisor shouldn't even try to access anything */
79 extern void *__nvhe_undefined_symbol;
80 #define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
83 /* Translate a kernel address @ptr into its equivalent linear mapping */
84 #define kvm_ksym_ref(ptr) \
87 if (!is_kernel_in_hyp_mode()) \
88 val = lm_alias((ptr)); \
91 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
97 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
98 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
99 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
100 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
102 extern atomic_t arm64_el2_vector_last_slot;
103 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
104 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
106 extern void __kvm_flush_vm_context(void);
107 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
109 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
110 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
112 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
114 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
116 extern void __kvm_enable_ssbs(void);
118 extern u64 __vgic_v3_get_ich_vtr_el2(void);
119 extern u64 __vgic_v3_read_vmcr(void);
120 extern void __vgic_v3_write_vmcr(u32 vmcr);
121 extern void __vgic_v3_init_lrs(void);
123 extern u32 __kvm_get_mdcr_el2(void);
125 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
128 * Obtain the PC-relative address of a kernel symbol
131 * The goal of this macro is to return a symbol's address based on a
132 * PC-relative computation, as opposed to a loading the VA from a
133 * constant pool or something similar. This works well for HYP, as an
134 * absolute VA is guaranteed to be wrong. Only use this if trying to
135 * obtain the address of a symbol (i.e. not something you obtained by
136 * following a pointer).
138 #define hyp_symbol_addr(s) \
141 asm("adrp %0, %1\n" \
142 "add %0, %0, :lo12:%1\n" \
143 : "=r" (addr) : "S" (&s)); \
148 * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
149 * provided that sym is really a *symbol* and not a pointer obtained from
150 * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
153 #define __hyp_this_cpu_ptr(sym) \
156 __verify_pcpu_ptr(&sym); \
157 __ptr = hyp_symbol_addr(sym); \
158 __ptr += read_sysreg(tpidr_el2); \
159 (typeof(sym) __kernel __force *)__ptr; \
162 #define __hyp_this_cpu_read(sym) \
164 *__hyp_this_cpu_ptr(sym); \
167 #define __KVM_EXTABLE(from, to) \
168 " .pushsection __kvm_ex_table, \"a\"\n" \
170 " .long (" #from " - .), (" #to " - .)\n" \
174 #define __kvm_at(at_op, addr) \
176 int __kvm_at_err = 0; \
179 " mrs %1, spsr_el2\n" \
180 " mrs %2, elr_el2\n" \
181 "1: at "at_op", %3\n" \
184 "2: msr spsr_el2, %1\n" \
185 " msr elr_el2, %2\n" \
188 __KVM_EXTABLE(1b, 2b) \
189 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
190 : "r" (addr), "i" (-EFAULT)); \
195 #else /* __ASSEMBLY__ */
197 .macro hyp_adr_this_cpu reg, sym, tmp
203 .macro hyp_ldr_this_cpu reg, sym, tmp
206 ldr \reg, [\reg, \tmp]
209 .macro get_host_ctxt reg, tmp
210 hyp_adr_this_cpu \reg, kvm_host_data, \tmp
211 add \reg, \reg, #HOST_DATA_CONTEXT
214 .macro get_vcpu_ptr vcpu, ctxt
215 get_host_ctxt \ctxt, \vcpu
216 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
220 * KVM extable for unexpected exceptions.
221 * In the same format _asm_extable, but output to a different section so that
222 * it can be mapped to EL2. The KVM version is not sorted. The caller must
224 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
225 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
227 .macro _kvm_extable, from, to
228 .pushsection __kvm_ex_table, "a"
230 .long (\from - .), (\to - .)
236 #endif /* __ARM_KVM_ASM_H__ */