Merge tag 'rpmsg-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[linux-2.6-microblaze.git] / arch / arm64 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDSO implementations.
4  *
5  * Copyright (C) 2012 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31
32 extern char vdso_start[], vdso_end[];
33 extern char vdso32_start[], vdso32_end[];
34
35 enum vdso_abi {
36         VDSO_ABI_AA64,
37         VDSO_ABI_AA32,
38 };
39
40 enum vvar_pages {
41         VVAR_DATA_PAGE_OFFSET,
42         VVAR_TIMENS_PAGE_OFFSET,
43         VVAR_NR_PAGES,
44 };
45
46 struct vdso_abi_info {
47         const char *name;
48         const char *vdso_code_start;
49         const char *vdso_code_end;
50         unsigned long vdso_pages;
51         /* Data Mapping */
52         struct vm_special_mapping *dm;
53         /* Code Mapping */
54         struct vm_special_mapping *cm;
55 };
56
57 static struct vdso_abi_info vdso_info[] __ro_after_init = {
58         [VDSO_ABI_AA64] = {
59                 .name = "vdso",
60                 .vdso_code_start = vdso_start,
61                 .vdso_code_end = vdso_end,
62         },
63 #ifdef CONFIG_COMPAT_VDSO
64         [VDSO_ABI_AA32] = {
65                 .name = "vdso32",
66                 .vdso_code_start = vdso32_start,
67                 .vdso_code_end = vdso32_end,
68         },
69 #endif /* CONFIG_COMPAT_VDSO */
70 };
71
72 /*
73  * The vDSO data page.
74  */
75 static union {
76         struct vdso_data        data[CS_BASES];
77         u8                      page[PAGE_SIZE];
78 } vdso_data_store __page_aligned_data;
79 struct vdso_data *vdso_data = vdso_data_store.data;
80
81 static int __vdso_remap(enum vdso_abi abi,
82                         const struct vm_special_mapping *sm,
83                         struct vm_area_struct *new_vma)
84 {
85         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
86         unsigned long vdso_size = vdso_info[abi].vdso_code_end -
87                                   vdso_info[abi].vdso_code_start;
88
89         if (vdso_size != new_size)
90                 return -EINVAL;
91
92         current->mm->context.vdso = (void *)new_vma->vm_start;
93
94         return 0;
95 }
96
97 static int __vdso_init(enum vdso_abi abi)
98 {
99         int i;
100         struct page **vdso_pagelist;
101         unsigned long pfn;
102
103         if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
104                 pr_err("vDSO is not a valid ELF object!\n");
105                 return -EINVAL;
106         }
107
108         vdso_info[abi].vdso_pages = (
109                         vdso_info[abi].vdso_code_end -
110                         vdso_info[abi].vdso_code_start) >>
111                         PAGE_SHIFT;
112
113         vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
114                                 sizeof(struct page *),
115                                 GFP_KERNEL);
116         if (vdso_pagelist == NULL)
117                 return -ENOMEM;
118
119         /* Grab the vDSO code pages. */
120         pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
121
122         for (i = 0; i < vdso_info[abi].vdso_pages; i++)
123                 vdso_pagelist[i] = pfn_to_page(pfn + i);
124
125         vdso_info[abi].cm->pages = vdso_pagelist;
126
127         return 0;
128 }
129
130 #ifdef CONFIG_TIME_NS
131 struct vdso_data *arch_get_vdso_data(void *vvar_page)
132 {
133         return (struct vdso_data *)(vvar_page);
134 }
135
136 /*
137  * The vvar mapping contains data for a specific time namespace, so when a task
138  * changes namespace we must unmap its vvar data for the old namespace.
139  * Subsequent faults will map in data for the new namespace.
140  *
141  * For more details see timens_setup_vdso_data().
142  */
143 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
144 {
145         struct mm_struct *mm = task->mm;
146         struct vm_area_struct *vma;
147
148         mmap_read_lock(mm);
149
150         for (vma = mm->mmap; vma; vma = vma->vm_next) {
151                 unsigned long size = vma->vm_end - vma->vm_start;
152
153                 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
154                         zap_page_range(vma, vma->vm_start, size);
155 #ifdef CONFIG_COMPAT_VDSO
156                 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
157                         zap_page_range(vma, vma->vm_start, size);
158 #endif
159         }
160
161         mmap_read_unlock(mm);
162         return 0;
163 }
164
165 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
166 {
167         if (likely(vma->vm_mm == current->mm))
168                 return current->nsproxy->time_ns->vvar_page;
169
170         /*
171          * VM_PFNMAP | VM_IO protect .fault() handler from being called
172          * through interfaces like /proc/$pid/mem or
173          * process_vm_{readv,writev}() as long as there's no .access()
174          * in special_mapping_vmops.
175          * For more details check_vma_flags() and __access_remote_vm()
176          */
177         WARN(1, "vvar_page accessed remotely");
178
179         return NULL;
180 }
181 #else
182 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
183 {
184         return NULL;
185 }
186 #endif
187
188 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
189                              struct vm_area_struct *vma, struct vm_fault *vmf)
190 {
191         struct page *timens_page = find_timens_vvar_page(vma);
192         unsigned long pfn;
193
194         switch (vmf->pgoff) {
195         case VVAR_DATA_PAGE_OFFSET:
196                 if (timens_page)
197                         pfn = page_to_pfn(timens_page);
198                 else
199                         pfn = sym_to_pfn(vdso_data);
200                 break;
201 #ifdef CONFIG_TIME_NS
202         case VVAR_TIMENS_PAGE_OFFSET:
203                 /*
204                  * If a task belongs to a time namespace then a namespace
205                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
206                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
207                  * offset.
208                  * See also the comment near timens_setup_vdso_data().
209                  */
210                 if (!timens_page)
211                         return VM_FAULT_SIGBUS;
212                 pfn = sym_to_pfn(vdso_data);
213                 break;
214 #endif /* CONFIG_TIME_NS */
215         default:
216                 return VM_FAULT_SIGBUS;
217         }
218
219         return vmf_insert_pfn(vma, vmf->address, pfn);
220 }
221
222 static int vvar_mremap(const struct vm_special_mapping *sm,
223                        struct vm_area_struct *new_vma)
224 {
225         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
226
227         if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
228                 return -EINVAL;
229
230         return 0;
231 }
232
233 static int __setup_additional_pages(enum vdso_abi abi,
234                                     struct mm_struct *mm,
235                                     struct linux_binprm *bprm,
236                                     int uses_interp)
237 {
238         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
239         unsigned long gp_flags = 0;
240         void *ret;
241
242         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
243
244         vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
245         /* Be sure to map the data page */
246         vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
247
248         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
249         if (IS_ERR_VALUE(vdso_base)) {
250                 ret = ERR_PTR(vdso_base);
251                 goto up_fail;
252         }
253
254         ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
255                                        VM_READ|VM_MAYREAD|VM_PFNMAP,
256                                        vdso_info[abi].dm);
257         if (IS_ERR(ret))
258                 goto up_fail;
259
260         if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
261                 gp_flags = VM_ARM64_BTI;
262
263         vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
264         mm->context.vdso = (void *)vdso_base;
265         ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
266                                        VM_READ|VM_EXEC|gp_flags|
267                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
268                                        vdso_info[abi].cm);
269         if (IS_ERR(ret))
270                 goto up_fail;
271
272         return 0;
273
274 up_fail:
275         mm->context.vdso = NULL;
276         return PTR_ERR(ret);
277 }
278
279 #ifdef CONFIG_COMPAT
280 /*
281  * Create and map the vectors page for AArch32 tasks.
282  */
283 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
284                 struct vm_area_struct *new_vma)
285 {
286         return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
287 }
288
289 enum aarch32_map {
290         AA32_MAP_VECTORS, /* kuser helpers */
291         AA32_MAP_SIGPAGE,
292         AA32_MAP_VVAR,
293         AA32_MAP_VDSO,
294 };
295
296 static struct page *aarch32_vectors_page __ro_after_init;
297 static struct page *aarch32_sig_page __ro_after_init;
298
299 static struct vm_special_mapping aarch32_vdso_maps[] = {
300         [AA32_MAP_VECTORS] = {
301                 .name   = "[vectors]", /* ABI */
302                 .pages  = &aarch32_vectors_page,
303         },
304         [AA32_MAP_SIGPAGE] = {
305                 .name   = "[sigpage]", /* ABI */
306                 .pages  = &aarch32_sig_page,
307         },
308         [AA32_MAP_VVAR] = {
309                 .name = "[vvar]",
310                 .fault = vvar_fault,
311                 .mremap = vvar_mremap,
312         },
313         [AA32_MAP_VDSO] = {
314                 .name = "[vdso]",
315                 .mremap = aarch32_vdso_mremap,
316         },
317 };
318
319 static int aarch32_alloc_kuser_vdso_page(void)
320 {
321         extern char __kuser_helper_start[], __kuser_helper_end[];
322         int kuser_sz = __kuser_helper_end - __kuser_helper_start;
323         unsigned long vdso_page;
324
325         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
326                 return 0;
327
328         vdso_page = get_zeroed_page(GFP_ATOMIC);
329         if (!vdso_page)
330                 return -ENOMEM;
331
332         memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
333                kuser_sz);
334         aarch32_vectors_page = virt_to_page(vdso_page);
335         flush_dcache_page(aarch32_vectors_page);
336         return 0;
337 }
338
339 static int aarch32_alloc_sigpage(void)
340 {
341         extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
342         int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
343         unsigned long sigpage;
344
345         sigpage = get_zeroed_page(GFP_ATOMIC);
346         if (!sigpage)
347                 return -ENOMEM;
348
349         memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
350         aarch32_sig_page = virt_to_page(sigpage);
351         flush_dcache_page(aarch32_sig_page);
352         return 0;
353 }
354
355 static int __aarch32_alloc_vdso_pages(void)
356 {
357
358         if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
359                 return 0;
360
361         vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
362         vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
363
364         return __vdso_init(VDSO_ABI_AA32);
365 }
366
367 static int __init aarch32_alloc_vdso_pages(void)
368 {
369         int ret;
370
371         ret = __aarch32_alloc_vdso_pages();
372         if (ret)
373                 return ret;
374
375         ret = aarch32_alloc_sigpage();
376         if (ret)
377                 return ret;
378
379         return aarch32_alloc_kuser_vdso_page();
380 }
381 arch_initcall(aarch32_alloc_vdso_pages);
382
383 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
384 {
385         void *ret;
386
387         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
388                 return 0;
389
390         /*
391          * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
392          * not safe to CoW the page containing the CPU exception vectors.
393          */
394         ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
395                                        VM_READ | VM_EXEC |
396                                        VM_MAYREAD | VM_MAYEXEC,
397                                        &aarch32_vdso_maps[AA32_MAP_VECTORS]);
398
399         return PTR_ERR_OR_ZERO(ret);
400 }
401
402 static int aarch32_sigreturn_setup(struct mm_struct *mm)
403 {
404         unsigned long addr;
405         void *ret;
406
407         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
408         if (IS_ERR_VALUE(addr)) {
409                 ret = ERR_PTR(addr);
410                 goto out;
411         }
412
413         /*
414          * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
415          * set breakpoints.
416          */
417         ret = _install_special_mapping(mm, addr, PAGE_SIZE,
418                                        VM_READ | VM_EXEC | VM_MAYREAD |
419                                        VM_MAYWRITE | VM_MAYEXEC,
420                                        &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
421         if (IS_ERR(ret))
422                 goto out;
423
424         mm->context.sigpage = (void *)addr;
425
426 out:
427         return PTR_ERR_OR_ZERO(ret);
428 }
429
430 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
431 {
432         struct mm_struct *mm = current->mm;
433         int ret;
434
435         if (mmap_write_lock_killable(mm))
436                 return -EINTR;
437
438         ret = aarch32_kuser_helpers_setup(mm);
439         if (ret)
440                 goto out;
441
442         if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
443                 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
444                                                uses_interp);
445                 if (ret)
446                         goto out;
447         }
448
449         ret = aarch32_sigreturn_setup(mm);
450 out:
451         mmap_write_unlock(mm);
452         return ret;
453 }
454 #endif /* CONFIG_COMPAT */
455
456 static int vdso_mremap(const struct vm_special_mapping *sm,
457                 struct vm_area_struct *new_vma)
458 {
459         return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
460 }
461
462 enum aarch64_map {
463         AA64_MAP_VVAR,
464         AA64_MAP_VDSO,
465 };
466
467 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
468         [AA64_MAP_VVAR] = {
469                 .name   = "[vvar]",
470                 .fault = vvar_fault,
471                 .mremap = vvar_mremap,
472         },
473         [AA64_MAP_VDSO] = {
474                 .name   = "[vdso]",
475                 .mremap = vdso_mremap,
476         },
477 };
478
479 static int __init vdso_init(void)
480 {
481         vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
482         vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
483
484         return __vdso_init(VDSO_ABI_AA64);
485 }
486 arch_initcall(vdso_init);
487
488 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
489 {
490         struct mm_struct *mm = current->mm;
491         int ret;
492
493         if (mmap_write_lock_killable(mm))
494                 return -EINTR;
495
496         ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
497         mmap_write_unlock(mm);
498
499         return ret;
500 }