1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDSO implementations.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
23 #include <vdso/datapage.h>
24 #include <vdso/helpers.h>
25 #include <vdso/vsyscall.h>
27 #include <asm/cacheflush.h>
28 #include <asm/signal32.h>
31 extern char vdso_start[], vdso_end[];
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start[], vdso32_end[];
34 #endif /* CONFIG_COMPAT_VDSO */
38 #ifdef CONFIG_COMPAT_VDSO
40 #endif /* CONFIG_COMPAT_VDSO */
43 struct vdso_abi_info {
45 const char *vdso_code_start;
46 const char *vdso_code_end;
47 unsigned long vdso_pages;
49 struct vm_special_mapping *dm;
51 struct vm_special_mapping *cm;
54 static struct vdso_abi_info vdso_info[] __ro_after_init = {
57 .vdso_code_start = vdso_start,
58 .vdso_code_end = vdso_end,
60 #ifdef CONFIG_COMPAT_VDSO
63 .vdso_code_start = vdso32_start,
64 .vdso_code_end = vdso32_end,
66 #endif /* CONFIG_COMPAT_VDSO */
73 struct vdso_data data[CS_BASES];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = vdso_data_store.data;
78 static int __vdso_remap(enum vdso_abi abi,
79 const struct vm_special_mapping *sm,
80 struct vm_area_struct *new_vma)
82 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
83 unsigned long vdso_size = vdso_info[abi].vdso_code_end -
84 vdso_info[abi].vdso_code_start;
86 if (vdso_size != new_size)
89 current->mm->context.vdso = (void *)new_vma->vm_start;
94 static int __vdso_init(enum vdso_abi abi)
97 struct page **vdso_pagelist;
100 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
101 pr_err("vDSO is not a valid ELF object!\n");
105 vdso_info[abi].vdso_pages = (
106 vdso_info[abi].vdso_code_end -
107 vdso_info[abi].vdso_code_start) >>
110 /* Allocate the vDSO pagelist, plus a page for the data. */
111 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
112 sizeof(struct page *),
114 if (vdso_pagelist == NULL)
117 /* Grab the vDSO data page. */
118 vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
121 /* Grab the vDSO code pages. */
122 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
124 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
125 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
127 vdso_info[abi].dm->pages = &vdso_pagelist[0];
128 vdso_info[abi].cm->pages = &vdso_pagelist[1];
133 static int __setup_additional_pages(enum vdso_abi abi,
134 struct mm_struct *mm,
135 struct linux_binprm *bprm,
138 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
139 unsigned long gp_flags = 0;
142 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
143 /* Be sure to map the data page */
144 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
146 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
147 if (IS_ERR_VALUE(vdso_base)) {
148 ret = ERR_PTR(vdso_base);
152 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
158 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
159 gp_flags = VM_ARM64_BTI;
161 vdso_base += PAGE_SIZE;
162 mm->context.vdso = (void *)vdso_base;
163 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
164 VM_READ|VM_EXEC|gp_flags|
165 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
173 mm->context.vdso = NULL;
179 * Create and map the vectors page for AArch32 tasks.
181 #ifdef CONFIG_COMPAT_VDSO
182 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
183 struct vm_area_struct *new_vma)
185 return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
187 #endif /* CONFIG_COMPAT_VDSO */
190 AA32_MAP_VECTORS, /* kuser helpers */
191 #ifdef CONFIG_COMPAT_VDSO
199 static struct page *aarch32_vectors_page __ro_after_init;
200 #ifndef CONFIG_COMPAT_VDSO
201 static struct page *aarch32_sig_page __ro_after_init;
204 static struct vm_special_mapping aarch32_vdso_maps[] = {
205 [AA32_MAP_VECTORS] = {
206 .name = "[vectors]", /* ABI */
207 .pages = &aarch32_vectors_page,
209 #ifdef CONFIG_COMPAT_VDSO
215 .mremap = aarch32_vdso_mremap,
218 [AA32_MAP_SIGPAGE] = {
219 .name = "[sigpage]", /* ABI */
220 .pages = &aarch32_sig_page,
222 #endif /* CONFIG_COMPAT_VDSO */
225 static int aarch32_alloc_kuser_vdso_page(void)
227 extern char __kuser_helper_start[], __kuser_helper_end[];
228 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
229 unsigned long vdso_page;
231 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
234 vdso_page = get_zeroed_page(GFP_ATOMIC);
238 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
240 aarch32_vectors_page = virt_to_page(vdso_page);
241 flush_dcache_page(aarch32_vectors_page);
245 #ifdef CONFIG_COMPAT_VDSO
246 static int __aarch32_alloc_vdso_pages(void)
250 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
251 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
253 ret = __vdso_init(VDSO_ABI_AA32);
257 return aarch32_alloc_kuser_vdso_page();
260 static int __aarch32_alloc_vdso_pages(void)
262 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
263 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
264 unsigned long sigpage;
267 sigpage = get_zeroed_page(GFP_ATOMIC);
271 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
272 aarch32_sig_page = virt_to_page(sigpage);
273 flush_dcache_page(aarch32_sig_page);
275 ret = aarch32_alloc_kuser_vdso_page();
281 #endif /* CONFIG_COMPAT_VDSO */
283 static int __init aarch32_alloc_vdso_pages(void)
285 return __aarch32_alloc_vdso_pages();
287 arch_initcall(aarch32_alloc_vdso_pages);
289 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
293 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
297 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
298 * not safe to CoW the page containing the CPU exception vectors.
300 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
302 VM_MAYREAD | VM_MAYEXEC,
303 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
305 return PTR_ERR_OR_ZERO(ret);
308 #ifndef CONFIG_COMPAT_VDSO
309 static int aarch32_sigreturn_setup(struct mm_struct *mm)
314 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
315 if (IS_ERR_VALUE(addr)) {
321 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
324 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
325 VM_READ | VM_EXEC | VM_MAYREAD |
326 VM_MAYWRITE | VM_MAYEXEC,
327 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
331 mm->context.vdso = (void *)addr;
334 return PTR_ERR_OR_ZERO(ret);
336 #endif /* !CONFIG_COMPAT_VDSO */
338 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
340 struct mm_struct *mm = current->mm;
343 if (mmap_write_lock_killable(mm))
346 ret = aarch32_kuser_helpers_setup(mm);
350 #ifdef CONFIG_COMPAT_VDSO
351 ret = __setup_additional_pages(VDSO_ABI_AA32,
356 ret = aarch32_sigreturn_setup(mm);
357 #endif /* CONFIG_COMPAT_VDSO */
360 mmap_write_unlock(mm);
363 #endif /* CONFIG_COMPAT */
365 static int vdso_mremap(const struct vm_special_mapping *sm,
366 struct vm_area_struct *new_vma)
368 return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
376 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
382 .mremap = vdso_mremap,
386 static int __init vdso_init(void)
388 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
389 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
391 return __vdso_init(VDSO_ABI_AA64);
393 arch_initcall(vdso_init);
395 int arch_setup_additional_pages(struct linux_binprm *bprm,
398 struct mm_struct *mm = current->mm;
401 if (mmap_write_lock_killable(mm))
404 ret = __setup_additional_pages(VDSO_ABI_AA64,
409 mmap_write_unlock(mm);