KVM: arm64: Remove hyp_symbol_addr
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_asm.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9
10 #include <asm/hyp_image.h>
11 #include <asm/virt.h>
12
13 #define ARM_EXIT_WITH_SERROR_BIT  31
14 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17
18 #define ARM_EXCEPTION_IRQ         0
19 #define ARM_EXCEPTION_EL1_SERROR  1
20 #define ARM_EXCEPTION_TRAP        2
21 #define ARM_EXCEPTION_IL          3
22 /* The hyp-stub will return this for any kvm_call_hyp() call */
23 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
24
25 #define kvm_arm_exception_type                                  \
26         {ARM_EXCEPTION_IRQ,             "IRQ"           },      \
27         {ARM_EXCEPTION_EL1_SERROR,      "SERROR"        },      \
28         {ARM_EXCEPTION_TRAP,            "TRAP"          },      \
29         {ARM_EXCEPTION_HYP_GONE,        "HYP_GONE"      }
30
31 /*
32  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33  * that jumps over this.
34  */
35 #define KVM_VECTOR_PREAMBLE     (2 * AARCH64_INSN_SIZE)
36
37 #define KVM_HOST_SMCCC_ID(id)                                           \
38         ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
39                            ARM_SMCCC_SMC_64,                            \
40                            ARM_SMCCC_OWNER_VENDOR_HYP,                  \
41                            (id))
42
43 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
44
45 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                    0
46 #define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run                    1
47 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context            2
48 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa          3
49 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid              4
50 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid        5
51 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff           6
52 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs                 7
53 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2         8
54 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr               9
55 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr              10
56 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs                11
57 #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2                12
58 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs               13
59 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs            14
60
61 #ifndef __ASSEMBLY__
62
63 #include <linux/mm.h>
64
65 #define DECLARE_KVM_VHE_SYM(sym)        extern char sym[]
66 #define DECLARE_KVM_NVHE_SYM(sym)       extern char kvm_nvhe_sym(sym)[]
67
68 /*
69  * Define a pair of symbols sharing the same name but one defined in
70  * VHE and the other in nVHE hyp implementations.
71  */
72 #define DECLARE_KVM_HYP_SYM(sym)                \
73         DECLARE_KVM_VHE_SYM(sym);               \
74         DECLARE_KVM_NVHE_SYM(sym)
75
76 #define DECLARE_KVM_VHE_PER_CPU(type, sym)      \
77         DECLARE_PER_CPU(type, sym)
78 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)     \
79         DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
80
81 #define DECLARE_KVM_HYP_PER_CPU(type, sym)      \
82         DECLARE_KVM_VHE_PER_CPU(type, sym);     \
83         DECLARE_KVM_NVHE_PER_CPU(type, sym)
84
85 /*
86  * Compute pointer to a symbol defined in nVHE percpu region.
87  * Returns NULL if percpu memory has not been allocated yet.
88  */
89 #define this_cpu_ptr_nvhe_sym(sym)      per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
90 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                          \
91         ({                                                                      \
92                 unsigned long base, off;                                        \
93                 base = kvm_arm_hyp_percpu_base[cpu];                            \
94                 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
95                       (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
96                 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
97         })
98
99 #if defined(__KVM_NVHE_HYPERVISOR__)
100
101 #define CHOOSE_NVHE_SYM(sym)    sym
102 #define CHOOSE_HYP_SYM(sym)     CHOOSE_NVHE_SYM(sym)
103
104 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
105 extern void *__nvhe_undefined_symbol;
106 #define CHOOSE_VHE_SYM(sym)             __nvhe_undefined_symbol
107 #define this_cpu_ptr_hyp_sym(sym)       (&__nvhe_undefined_symbol)
108 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__nvhe_undefined_symbol)
109
110 #elif defined(__KVM_VHE_HYPERVISOR__)
111
112 #define CHOOSE_VHE_SYM(sym)     sym
113 #define CHOOSE_HYP_SYM(sym)     CHOOSE_VHE_SYM(sym)
114
115 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
116 extern void *__vhe_undefined_symbol;
117 #define CHOOSE_NVHE_SYM(sym)            __vhe_undefined_symbol
118 #define this_cpu_ptr_hyp_sym(sym)       (&__vhe_undefined_symbol)
119 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__vhe_undefined_symbol)
120
121 #else
122
123 /*
124  * BIG FAT WARNINGS:
125  *
126  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
127  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
128  *   while this is used early at boot time, when the capabilities are
129  *   not final yet....
130  *
131  * - Don't let the nVHE hypervisor have access to this, as it will
132  *   pick the *wrong* symbol (yes, it runs at EL2...).
133  */
134 #define CHOOSE_HYP_SYM(sym)             (is_kernel_in_hyp_mode()        \
135                                            ? CHOOSE_VHE_SYM(sym)        \
136                                            : CHOOSE_NVHE_SYM(sym))
137
138 #define this_cpu_ptr_hyp_sym(sym)       (is_kernel_in_hyp_mode()        \
139                                            ? this_cpu_ptr(&sym)         \
140                                            : this_cpu_ptr_nvhe_sym(sym))
141
142 #define per_cpu_ptr_hyp_sym(sym, cpu)   (is_kernel_in_hyp_mode()        \
143                                            ? per_cpu_ptr(&sym, cpu)     \
144                                            : per_cpu_ptr_nvhe_sym(sym, cpu))
145
146 #define CHOOSE_VHE_SYM(sym)     sym
147 #define CHOOSE_NVHE_SYM(sym)    kvm_nvhe_sym(sym)
148
149 #endif
150
151 struct kvm_nvhe_init_params {
152         unsigned long mair_el2;
153         unsigned long tcr_el2;
154         unsigned long tpidr_el2;
155         unsigned long stack_hyp_va;
156         phys_addr_t pgd_pa;
157 };
158
159 /* Translate a kernel address @ptr into its equivalent linear mapping */
160 #define kvm_ksym_ref(ptr)                                               \
161         ({                                                              \
162                 void *val = (ptr);                                      \
163                 if (!is_kernel_in_hyp_mode())                           \
164                         val = lm_alias((ptr));                          \
165                 val;                                                    \
166          })
167 #define kvm_ksym_ref_nvhe(sym)  kvm_ksym_ref(kvm_nvhe_sym(sym))
168
169 struct kvm;
170 struct kvm_vcpu;
171 struct kvm_s2_mmu;
172
173 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
174 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
175 #define __kvm_hyp_init          CHOOSE_NVHE_SYM(__kvm_hyp_init)
176 #define __kvm_hyp_vector        CHOOSE_HYP_SYM(__kvm_hyp_vector)
177
178 extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
179 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
180 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
181
182 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
183 #define __bp_harden_hyp_vecs    CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
184
185 extern void __kvm_flush_vm_context(void);
186 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
187                                      int level);
188 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
189 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
190
191 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
192
193 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
194
195 extern u64 __vgic_v3_get_ich_vtr_el2(void);
196 extern u64 __vgic_v3_read_vmcr(void);
197 extern void __vgic_v3_write_vmcr(u32 vmcr);
198 extern void __vgic_v3_init_lrs(void);
199
200 extern u32 __kvm_get_mdcr_el2(void);
201
202 #define __KVM_EXTABLE(from, to)                                         \
203         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
204         "       .align          3\n"                                    \
205         "       .long           (" #from " - .), (" #to " - .)\n"       \
206         "       .popsection\n"
207
208
209 #define __kvm_at(at_op, addr)                                           \
210 ( {                                                                     \
211         int __kvm_at_err = 0;                                           \
212         u64 spsr, elr;                                                  \
213         asm volatile(                                                   \
214         "       mrs     %1, spsr_el2\n"                                 \
215         "       mrs     %2, elr_el2\n"                                  \
216         "1:     at      "at_op", %3\n"                                  \
217         "       isb\n"                                                  \
218         "       b       9f\n"                                           \
219         "2:     msr     spsr_el2, %1\n"                                 \
220         "       msr     elr_el2, %2\n"                                  \
221         "       mov     %w0, %4\n"                                      \
222         "9:\n"                                                          \
223         __KVM_EXTABLE(1b, 2b)                                           \
224         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
225         : "r" (addr), "i" (-EFAULT));                                   \
226         __kvm_at_err;                                                   \
227 } )
228
229
230 #else /* __ASSEMBLY__ */
231
232 .macro get_host_ctxt reg, tmp
233         adr_this_cpu \reg, kvm_host_data, \tmp
234         add     \reg, \reg, #HOST_DATA_CONTEXT
235 .endm
236
237 .macro get_vcpu_ptr vcpu, ctxt
238         get_host_ctxt \ctxt, \vcpu
239         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
240 .endm
241
242 .macro get_loaded_vcpu vcpu, ctxt
243         adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
244         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
245 .endm
246
247 .macro set_loaded_vcpu vcpu, ctxt, tmp
248         adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
249         str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
250 .endm
251
252 /*
253  * KVM extable for unexpected exceptions.
254  * In the same format _asm_extable, but output to a different section so that
255  * it can be mapped to EL2. The KVM version is not sorted. The caller must
256  * ensure:
257  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
258  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
259  */
260 .macro  _kvm_extable, from, to
261         .pushsection    __kvm_ex_table, "a"
262         .align          3
263         .long           (\from - .), (\to - .)
264         .popsection
265 .endm
266
267 #define CPU_XREG_OFFSET(x)      (CPU_USER_PT_REGS + 8*x)
268 #define CPU_LR_OFFSET           CPU_XREG_OFFSET(30)
269 #define CPU_SP_EL0_OFFSET       (CPU_LR_OFFSET + 8)
270
271 /*
272  * We treat x18 as callee-saved as the host may use it as a platform
273  * register (e.g. for shadow call stack).
274  */
275 .macro save_callee_saved_regs ctxt
276         str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
277         stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
278         stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
279         stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
280         stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
281         stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
282         stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
283 .endm
284
285 .macro restore_callee_saved_regs ctxt
286         // We require \ctxt is not x18-x28
287         ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
288         ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
289         ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
290         ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
291         ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
292         ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
293         ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
294 .endm
295
296 .macro save_sp_el0 ctxt, tmp
297         mrs     \tmp,   sp_el0
298         str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
299 .endm
300
301 .macro restore_sp_el0 ctxt, tmp
302         ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
303         msr     sp_el0, \tmp
304 .endm
305
306 #endif
307
308 #endif /* __ARM_KVM_ASM_H__ */