arm64: Work around broken GCC 4.9 handling of "S" constraint
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_asm.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9
10 #include <asm/hyp_image.h>
11 #include <asm/virt.h>
12
13 #define ARM_EXIT_WITH_SERROR_BIT  31
14 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
17
18 #define ARM_EXCEPTION_IRQ         0
19 #define ARM_EXCEPTION_EL1_SERROR  1
20 #define ARM_EXCEPTION_TRAP        2
21 #define ARM_EXCEPTION_IL          3
22 /* The hyp-stub will return this for any kvm_call_hyp() call */
23 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
24
25 #define kvm_arm_exception_type                                  \
26         {ARM_EXCEPTION_IRQ,             "IRQ"           },      \
27         {ARM_EXCEPTION_EL1_SERROR,      "SERROR"        },      \
28         {ARM_EXCEPTION_TRAP,            "TRAP"          },      \
29         {ARM_EXCEPTION_HYP_GONE,        "HYP_GONE"      }
30
31 /*
32  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
33  * that jumps over this.
34  */
35 #define KVM_VECTOR_PREAMBLE     (2 * AARCH64_INSN_SIZE)
36
37 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
38
39 #define KVM_HOST_SMCCC_ID(id)                                           \
40         ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
41                            ARM_SMCCC_SMC_64,                            \
42                            ARM_SMCCC_OWNER_VENDOR_HYP,                  \
43                            (id))
44
45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
46
47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                    0
48 #define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run                    1
49 #define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context            2
50 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa          3
51 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid              4
52 #define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid        5
53 #define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff           6
54 #define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs                 7
55 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2         8
56 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr               9
57 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr              10
58 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs                11
59 #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2                12
60 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs               13
61 #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs            14
62
63 #ifndef __ASSEMBLY__
64
65 #include <linux/mm.h>
66
67 #define DECLARE_KVM_VHE_SYM(sym)        extern char sym[]
68 #define DECLARE_KVM_NVHE_SYM(sym)       extern char kvm_nvhe_sym(sym)[]
69
70 /*
71  * Define a pair of symbols sharing the same name but one defined in
72  * VHE and the other in nVHE hyp implementations.
73  */
74 #define DECLARE_KVM_HYP_SYM(sym)                \
75         DECLARE_KVM_VHE_SYM(sym);               \
76         DECLARE_KVM_NVHE_SYM(sym)
77
78 #define DECLARE_KVM_VHE_PER_CPU(type, sym)      \
79         DECLARE_PER_CPU(type, sym)
80 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)     \
81         DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
82
83 #define DECLARE_KVM_HYP_PER_CPU(type, sym)      \
84         DECLARE_KVM_VHE_PER_CPU(type, sym);     \
85         DECLARE_KVM_NVHE_PER_CPU(type, sym)
86
87 /*
88  * Compute pointer to a symbol defined in nVHE percpu region.
89  * Returns NULL if percpu memory has not been allocated yet.
90  */
91 #define this_cpu_ptr_nvhe_sym(sym)      per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
92 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                          \
93         ({                                                                      \
94                 unsigned long base, off;                                        \
95                 base = kvm_arm_hyp_percpu_base[cpu];                            \
96                 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
97                       (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
98                 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
99         })
100
101 #if defined(__KVM_NVHE_HYPERVISOR__)
102
103 #define CHOOSE_NVHE_SYM(sym)    sym
104 #define CHOOSE_HYP_SYM(sym)     CHOOSE_NVHE_SYM(sym)
105
106 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
107 extern void *__nvhe_undefined_symbol;
108 #define CHOOSE_VHE_SYM(sym)             __nvhe_undefined_symbol
109 #define this_cpu_ptr_hyp_sym(sym)       (&__nvhe_undefined_symbol)
110 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__nvhe_undefined_symbol)
111
112 #elif defined(__KVM_VHE_HYPERVISOR__)
113
114 #define CHOOSE_VHE_SYM(sym)     sym
115 #define CHOOSE_HYP_SYM(sym)     CHOOSE_VHE_SYM(sym)
116
117 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
118 extern void *__vhe_undefined_symbol;
119 #define CHOOSE_NVHE_SYM(sym)            __vhe_undefined_symbol
120 #define this_cpu_ptr_hyp_sym(sym)       (&__vhe_undefined_symbol)
121 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__vhe_undefined_symbol)
122
123 #else
124
125 /*
126  * BIG FAT WARNINGS:
127  *
128  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
129  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
130  *   while this is used early at boot time, when the capabilities are
131  *   not final yet....
132  *
133  * - Don't let the nVHE hypervisor have access to this, as it will
134  *   pick the *wrong* symbol (yes, it runs at EL2...).
135  */
136 #define CHOOSE_HYP_SYM(sym)             (is_kernel_in_hyp_mode()        \
137                                            ? CHOOSE_VHE_SYM(sym)        \
138                                            : CHOOSE_NVHE_SYM(sym))
139
140 #define this_cpu_ptr_hyp_sym(sym)       (is_kernel_in_hyp_mode()        \
141                                            ? this_cpu_ptr(&sym)         \
142                                            : this_cpu_ptr_nvhe_sym(sym))
143
144 #define per_cpu_ptr_hyp_sym(sym, cpu)   (is_kernel_in_hyp_mode()        \
145                                            ? per_cpu_ptr(&sym, cpu)     \
146                                            : per_cpu_ptr_nvhe_sym(sym, cpu))
147
148 #define CHOOSE_VHE_SYM(sym)     sym
149 #define CHOOSE_NVHE_SYM(sym)    kvm_nvhe_sym(sym)
150
151 #endif
152
153 /* Translate a kernel address @ptr into its equivalent linear mapping */
154 #define kvm_ksym_ref(ptr)                                               \
155         ({                                                              \
156                 void *val = (ptr);                                      \
157                 if (!is_kernel_in_hyp_mode())                           \
158                         val = lm_alias((ptr));                          \
159                 val;                                                    \
160          })
161 #define kvm_ksym_ref_nvhe(sym)  kvm_ksym_ref(kvm_nvhe_sym(sym))
162
163 struct kvm;
164 struct kvm_vcpu;
165 struct kvm_s2_mmu;
166
167 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
168 DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
169 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
170 #define __kvm_hyp_init          CHOOSE_NVHE_SYM(__kvm_hyp_init)
171 #define __kvm_hyp_host_vector   CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
172 #define __kvm_hyp_vector        CHOOSE_HYP_SYM(__kvm_hyp_vector)
173
174 extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
175 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
176 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
177
178 extern atomic_t arm64_el2_vector_last_slot;
179 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
180 #define __bp_harden_hyp_vecs    CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
181
182 extern void __kvm_flush_vm_context(void);
183 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
184                                      int level);
185 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
186 extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
187
188 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
189
190 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
191
192 extern void __kvm_enable_ssbs(void);
193
194 extern u64 __vgic_v3_get_ich_vtr_el2(void);
195 extern u64 __vgic_v3_read_vmcr(void);
196 extern void __vgic_v3_write_vmcr(u32 vmcr);
197 extern void __vgic_v3_init_lrs(void);
198
199 extern u32 __kvm_get_mdcr_el2(void);
200
201 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
202
203 #if defined(GCC_VERSION) && GCC_VERSION < 50000
204 #define SYM_CONSTRAINT  "i"
205 #else
206 #define SYM_CONSTRAINT  "S"
207 #endif
208
209 /*
210  * Obtain the PC-relative address of a kernel symbol
211  * s: symbol
212  *
213  * The goal of this macro is to return a symbol's address based on a
214  * PC-relative computation, as opposed to a loading the VA from a
215  * constant pool or something similar. This works well for HYP, as an
216  * absolute VA is guaranteed to be wrong. Only use this if trying to
217  * obtain the address of a symbol (i.e. not something you obtained by
218  * following a pointer).
219  */
220 #define hyp_symbol_addr(s)                                              \
221         ({                                                              \
222                 typeof(s) *addr;                                        \
223                 asm("adrp       %0, %1\n"                               \
224                     "add        %0, %0, :lo12:%1\n"                     \
225                     : "=r" (addr) : SYM_CONSTRAINT (&s));               \
226                 addr;                                                   \
227         })
228
229 #define __KVM_EXTABLE(from, to)                                         \
230         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
231         "       .align          3\n"                                    \
232         "       .long           (" #from " - .), (" #to " - .)\n"       \
233         "       .popsection\n"
234
235
236 #define __kvm_at(at_op, addr)                                           \
237 ( {                                                                     \
238         int __kvm_at_err = 0;                                           \
239         u64 spsr, elr;                                                  \
240         asm volatile(                                                   \
241         "       mrs     %1, spsr_el2\n"                                 \
242         "       mrs     %2, elr_el2\n"                                  \
243         "1:     at      "at_op", %3\n"                                  \
244         "       isb\n"                                                  \
245         "       b       9f\n"                                           \
246         "2:     msr     spsr_el2, %1\n"                                 \
247         "       msr     elr_el2, %2\n"                                  \
248         "       mov     %w0, %4\n"                                      \
249         "9:\n"                                                          \
250         __KVM_EXTABLE(1b, 2b)                                           \
251         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
252         : "r" (addr), "i" (-EFAULT));                                   \
253         __kvm_at_err;                                                   \
254 } )
255
256
257 #else /* __ASSEMBLY__ */
258
259 .macro get_host_ctxt reg, tmp
260         adr_this_cpu \reg, kvm_host_data, \tmp
261         add     \reg, \reg, #HOST_DATA_CONTEXT
262 .endm
263
264 .macro get_vcpu_ptr vcpu, ctxt
265         get_host_ctxt \ctxt, \vcpu
266         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
267 .endm
268
269 .macro get_loaded_vcpu vcpu, ctxt
270         adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
271         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
272 .endm
273
274 .macro set_loaded_vcpu vcpu, ctxt, tmp
275         adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
276         str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
277 .endm
278
279 /*
280  * KVM extable for unexpected exceptions.
281  * In the same format _asm_extable, but output to a different section so that
282  * it can be mapped to EL2. The KVM version is not sorted. The caller must
283  * ensure:
284  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
285  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
286  */
287 .macro  _kvm_extable, from, to
288         .pushsection    __kvm_ex_table, "a"
289         .align          3
290         .long           (\from - .), (\to - .)
291         .popsection
292 .endm
293
294 #define CPU_XREG_OFFSET(x)      (CPU_USER_PT_REGS + 8*x)
295 #define CPU_LR_OFFSET           CPU_XREG_OFFSET(30)
296 #define CPU_SP_EL0_OFFSET       (CPU_LR_OFFSET + 8)
297
298 /*
299  * We treat x18 as callee-saved as the host may use it as a platform
300  * register (e.g. for shadow call stack).
301  */
302 .macro save_callee_saved_regs ctxt
303         str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
304         stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
305         stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
306         stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
307         stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
308         stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
309         stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
310 .endm
311
312 .macro restore_callee_saved_regs ctxt
313         // We require \ctxt is not x18-x28
314         ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
315         ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
316         ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
317         ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
318         ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
319         ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
320         ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
321 .endm
322
323 .macro save_sp_el0 ctxt, tmp
324         mrs     \tmp,   sp_el0
325         str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
326 .endm
327
328 .macro restore_sp_el0 ctxt, tmp
329         ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
330         msr     sp_el0, \tmp
331 .endm
332
333 #endif
334
335 #endif /* __ARM_KVM_ASM_H__ */