Merge tag 'for-linus-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[linux-2.6-microblaze.git] / arch / arm64 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
26
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
29 #include <asm/vdso.h>
30
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
35
36 enum vdso_abi {
37         VDSO_ABI_AA64,
38 #ifdef CONFIG_COMPAT_VDSO
39         VDSO_ABI_AA32,
40 #endif /* CONFIG_COMPAT_VDSO */
41 };
42
43 struct vdso_abi_info {
44         const char *name;
45         const char *vdso_code_start;
46         const char *vdso_code_end;
47         unsigned long vdso_pages;
48         /* Data Mapping */
49         struct vm_special_mapping *dm;
50         /* Code Mapping */
51         struct vm_special_mapping *cm;
52 };
53
54 static struct vdso_abi_info vdso_info[] __ro_after_init = {
55         [VDSO_ABI_AA64] = {
56                 .name = "vdso",
57                 .vdso_code_start = vdso_start,
58                 .vdso_code_end = vdso_end,
59         },
60 #ifdef CONFIG_COMPAT_VDSO
61         [VDSO_ABI_AA32] = {
62                 .name = "vdso32",
63                 .vdso_code_start = vdso32_start,
64                 .vdso_code_end = vdso32_end,
65         },
66 #endif /* CONFIG_COMPAT_VDSO */
67 };
68
69 /*
70  * The vDSO data page.
71  */
72 static union {
73         struct vdso_data        data[CS_BASES];
74         u8                      page[PAGE_SIZE];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = vdso_data_store.data;
77
78 static int __vdso_remap(enum vdso_abi abi,
79                         const struct vm_special_mapping *sm,
80                         struct vm_area_struct *new_vma)
81 {
82         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
83         unsigned long vdso_size = vdso_info[abi].vdso_code_end -
84                                   vdso_info[abi].vdso_code_start;
85
86         if (vdso_size != new_size)
87                 return -EINVAL;
88
89         current->mm->context.vdso = (void *)new_vma->vm_start;
90
91         return 0;
92 }
93
94 static int __vdso_init(enum vdso_abi abi)
95 {
96         int i;
97         struct page **vdso_pagelist;
98         unsigned long pfn;
99
100         if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
101                 pr_err("vDSO is not a valid ELF object!\n");
102                 return -EINVAL;
103         }
104
105         vdso_info[abi].vdso_pages = (
106                         vdso_info[abi].vdso_code_end -
107                         vdso_info[abi].vdso_code_start) >>
108                         PAGE_SHIFT;
109
110         /* Allocate the vDSO pagelist, plus a page for the data. */
111         vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
112                                 sizeof(struct page *),
113                                 GFP_KERNEL);
114         if (vdso_pagelist == NULL)
115                 return -ENOMEM;
116
117         /* Grab the vDSO data page. */
118         vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
119
120
121         /* Grab the vDSO code pages. */
122         pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
123
124         for (i = 0; i < vdso_info[abi].vdso_pages; i++)
125                 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
126
127         vdso_info[abi].dm->pages = &vdso_pagelist[0];
128         vdso_info[abi].cm->pages = &vdso_pagelist[1];
129
130         return 0;
131 }
132
133 static int __setup_additional_pages(enum vdso_abi abi,
134                                     struct mm_struct *mm,
135                                     struct linux_binprm *bprm,
136                                     int uses_interp)
137 {
138         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
139         unsigned long gp_flags = 0;
140         void *ret;
141
142         vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
143         /* Be sure to map the data page */
144         vdso_mapping_len = vdso_text_len + PAGE_SIZE;
145
146         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
147         if (IS_ERR_VALUE(vdso_base)) {
148                 ret = ERR_PTR(vdso_base);
149                 goto up_fail;
150         }
151
152         ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
153                                        VM_READ|VM_MAYREAD,
154                                        vdso_info[abi].dm);
155         if (IS_ERR(ret))
156                 goto up_fail;
157
158         if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
159                 gp_flags = VM_ARM64_BTI;
160
161         vdso_base += PAGE_SIZE;
162         mm->context.vdso = (void *)vdso_base;
163         ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
164                                        VM_READ|VM_EXEC|gp_flags|
165                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
166                                        vdso_info[abi].cm);
167         if (IS_ERR(ret))
168                 goto up_fail;
169
170         return 0;
171
172 up_fail:
173         mm->context.vdso = NULL;
174         return PTR_ERR(ret);
175 }
176
177 #ifdef CONFIG_COMPAT
178 /*
179  * Create and map the vectors page for AArch32 tasks.
180  */
181 #ifdef CONFIG_COMPAT_VDSO
182 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
183                 struct vm_area_struct *new_vma)
184 {
185         return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
186 }
187 #endif /* CONFIG_COMPAT_VDSO */
188
189 enum aarch32_map {
190         AA32_MAP_VECTORS, /* kuser helpers */
191 #ifdef CONFIG_COMPAT_VDSO
192         AA32_MAP_VVAR,
193         AA32_MAP_VDSO,
194 #else
195         AA32_MAP_SIGPAGE
196 #endif
197 };
198
199 static struct page *aarch32_vectors_page __ro_after_init;
200 #ifndef CONFIG_COMPAT_VDSO
201 static struct page *aarch32_sig_page __ro_after_init;
202 #endif
203
204 static struct vm_special_mapping aarch32_vdso_maps[] = {
205         [AA32_MAP_VECTORS] = {
206                 .name   = "[vectors]", /* ABI */
207                 .pages  = &aarch32_vectors_page,
208         },
209 #ifdef CONFIG_COMPAT_VDSO
210         [AA32_MAP_VVAR] = {
211                 .name = "[vvar]",
212         },
213         [AA32_MAP_VDSO] = {
214                 .name = "[vdso]",
215                 .mremap = aarch32_vdso_mremap,
216         },
217 #else
218         [AA32_MAP_SIGPAGE] = {
219                 .name   = "[sigpage]", /* ABI */
220                 .pages  = &aarch32_sig_page,
221         },
222 #endif /* CONFIG_COMPAT_VDSO */
223 };
224
225 static int aarch32_alloc_kuser_vdso_page(void)
226 {
227         extern char __kuser_helper_start[], __kuser_helper_end[];
228         int kuser_sz = __kuser_helper_end - __kuser_helper_start;
229         unsigned long vdso_page;
230
231         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
232                 return 0;
233
234         vdso_page = get_zeroed_page(GFP_ATOMIC);
235         if (!vdso_page)
236                 return -ENOMEM;
237
238         memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
239                kuser_sz);
240         aarch32_vectors_page = virt_to_page(vdso_page);
241         flush_dcache_page(aarch32_vectors_page);
242         return 0;
243 }
244
245 #ifdef CONFIG_COMPAT_VDSO
246 static int __aarch32_alloc_vdso_pages(void)
247 {
248         int ret;
249
250         vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
251         vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
252
253         ret = __vdso_init(VDSO_ABI_AA32);
254         if (ret)
255                 return ret;
256
257         return aarch32_alloc_kuser_vdso_page();
258 }
259 #else
260 static int __aarch32_alloc_vdso_pages(void)
261 {
262         extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
263         int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
264         unsigned long sigpage;
265         int ret;
266
267         sigpage = get_zeroed_page(GFP_ATOMIC);
268         if (!sigpage)
269                 return -ENOMEM;
270
271         memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
272         aarch32_sig_page = virt_to_page(sigpage);
273         flush_dcache_page(aarch32_sig_page);
274
275         ret = aarch32_alloc_kuser_vdso_page();
276         if (ret)
277                 free_page(sigpage);
278
279         return ret;
280 }
281 #endif /* CONFIG_COMPAT_VDSO */
282
283 static int __init aarch32_alloc_vdso_pages(void)
284 {
285         return __aarch32_alloc_vdso_pages();
286 }
287 arch_initcall(aarch32_alloc_vdso_pages);
288
289 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
290 {
291         void *ret;
292
293         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
294                 return 0;
295
296         /*
297          * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
298          * not safe to CoW the page containing the CPU exception vectors.
299          */
300         ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
301                                        VM_READ | VM_EXEC |
302                                        VM_MAYREAD | VM_MAYEXEC,
303                                        &aarch32_vdso_maps[AA32_MAP_VECTORS]);
304
305         return PTR_ERR_OR_ZERO(ret);
306 }
307
308 #ifndef CONFIG_COMPAT_VDSO
309 static int aarch32_sigreturn_setup(struct mm_struct *mm)
310 {
311         unsigned long addr;
312         void *ret;
313
314         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
315         if (IS_ERR_VALUE(addr)) {
316                 ret = ERR_PTR(addr);
317                 goto out;
318         }
319
320         /*
321          * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
322          * set breakpoints.
323          */
324         ret = _install_special_mapping(mm, addr, PAGE_SIZE,
325                                        VM_READ | VM_EXEC | VM_MAYREAD |
326                                        VM_MAYWRITE | VM_MAYEXEC,
327                                        &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
328         if (IS_ERR(ret))
329                 goto out;
330
331         mm->context.vdso = (void *)addr;
332
333 out:
334         return PTR_ERR_OR_ZERO(ret);
335 }
336 #endif /* !CONFIG_COMPAT_VDSO */
337
338 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
339 {
340         struct mm_struct *mm = current->mm;
341         int ret;
342
343         if (mmap_write_lock_killable(mm))
344                 return -EINTR;
345
346         ret = aarch32_kuser_helpers_setup(mm);
347         if (ret)
348                 goto out;
349
350 #ifdef CONFIG_COMPAT_VDSO
351         ret = __setup_additional_pages(VDSO_ABI_AA32,
352                                        mm,
353                                        bprm,
354                                        uses_interp);
355 #else
356         ret = aarch32_sigreturn_setup(mm);
357 #endif /* CONFIG_COMPAT_VDSO */
358
359 out:
360         mmap_write_unlock(mm);
361         return ret;
362 }
363 #endif /* CONFIG_COMPAT */
364
365 static int vdso_mremap(const struct vm_special_mapping *sm,
366                 struct vm_area_struct *new_vma)
367 {
368         return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
369 }
370
371 enum aarch64_map {
372         AA64_MAP_VVAR,
373         AA64_MAP_VDSO,
374 };
375
376 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
377         [AA64_MAP_VVAR] = {
378                 .name   = "[vvar]",
379         },
380         [AA64_MAP_VDSO] = {
381                 .name   = "[vdso]",
382                 .mremap = vdso_mremap,
383         },
384 };
385
386 static int __init vdso_init(void)
387 {
388         vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
389         vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
390
391         return __vdso_init(VDSO_ABI_AA64);
392 }
393 arch_initcall(vdso_init);
394
395 int arch_setup_additional_pages(struct linux_binprm *bprm,
396                                 int uses_interp)
397 {
398         struct mm_struct *mm = current->mm;
399         int ret;
400
401         if (mmap_write_lock_killable(mm))
402                 return -EINTR;
403
404         ret = __setup_additional_pages(VDSO_ABI_AA64,
405                                        mm,
406                                        bprm,
407                                        uses_interp);
408
409         mmap_write_unlock(mm);
410
411         return ret;
412 }