1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDSO implementations.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
32 extern char vdso_start[], vdso_end[];
33 extern char vdso32_start[], vdso32_end[];
41 VVAR_DATA_PAGE_OFFSET,
42 VVAR_TIMENS_PAGE_OFFSET,
46 struct vdso_abi_info {
48 const char *vdso_code_start;
49 const char *vdso_code_end;
50 unsigned long vdso_pages;
52 struct vm_special_mapping *dm;
54 struct vm_special_mapping *cm;
57 static struct vdso_abi_info vdso_info[] __ro_after_init = {
60 .vdso_code_start = vdso_start,
61 .vdso_code_end = vdso_end,
63 #ifdef CONFIG_COMPAT_VDSO
66 .vdso_code_start = vdso32_start,
67 .vdso_code_end = vdso32_end,
69 #endif /* CONFIG_COMPAT_VDSO */
76 struct vdso_data data[CS_BASES];
78 } vdso_data_store __page_aligned_data;
79 struct vdso_data *vdso_data = vdso_data_store.data;
81 static int __vdso_remap(enum vdso_abi abi,
82 const struct vm_special_mapping *sm,
83 struct vm_area_struct *new_vma)
85 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
86 unsigned long vdso_size = vdso_info[abi].vdso_code_end -
87 vdso_info[abi].vdso_code_start;
89 if (vdso_size != new_size)
92 current->mm->context.vdso = (void *)new_vma->vm_start;
97 static int __vdso_init(enum vdso_abi abi)
100 struct page **vdso_pagelist;
103 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
104 pr_err("vDSO is not a valid ELF object!\n");
108 vdso_info[abi].vdso_pages = (
109 vdso_info[abi].vdso_code_end -
110 vdso_info[abi].vdso_code_start) >>
113 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
114 sizeof(struct page *),
116 if (vdso_pagelist == NULL)
119 /* Grab the vDSO code pages. */
120 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
122 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
123 vdso_pagelist[i] = pfn_to_page(pfn + i);
125 vdso_info[abi].cm->pages = vdso_pagelist;
130 #ifdef CONFIG_TIME_NS
131 struct vdso_data *arch_get_vdso_data(void *vvar_page)
133 return (struct vdso_data *)(vvar_page);
137 * The vvar mapping contains data for a specific time namespace, so when a task
138 * changes namespace we must unmap its vvar data for the old namespace.
139 * Subsequent faults will map in data for the new namespace.
141 * For more details see timens_setup_vdso_data().
143 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
145 struct mm_struct *mm = task->mm;
146 struct vm_area_struct *vma;
150 for (vma = mm->mmap; vma; vma = vma->vm_next) {
151 unsigned long size = vma->vm_end - vma->vm_start;
153 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
154 zap_page_range(vma, vma->vm_start, size);
155 #ifdef CONFIG_COMPAT_VDSO
156 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
157 zap_page_range(vma, vma->vm_start, size);
161 mmap_read_unlock(mm);
165 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
167 if (likely(vma->vm_mm == current->mm))
168 return current->nsproxy->time_ns->vvar_page;
171 * VM_PFNMAP | VM_IO protect .fault() handler from being called
172 * through interfaces like /proc/$pid/mem or
173 * process_vm_{readv,writev}() as long as there's no .access()
174 * in special_mapping_vmops.
175 * For more details check_vma_flags() and __access_remote_vm()
177 WARN(1, "vvar_page accessed remotely");
182 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
188 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
189 struct vm_area_struct *vma, struct vm_fault *vmf)
191 struct page *timens_page = find_timens_vvar_page(vma);
194 switch (vmf->pgoff) {
195 case VVAR_DATA_PAGE_OFFSET:
197 pfn = page_to_pfn(timens_page);
199 pfn = sym_to_pfn(vdso_data);
201 #ifdef CONFIG_TIME_NS
202 case VVAR_TIMENS_PAGE_OFFSET:
204 * If a task belongs to a time namespace then a namespace
205 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
206 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
208 * See also the comment near timens_setup_vdso_data().
211 return VM_FAULT_SIGBUS;
212 pfn = sym_to_pfn(vdso_data);
214 #endif /* CONFIG_TIME_NS */
216 return VM_FAULT_SIGBUS;
219 return vmf_insert_pfn(vma, vmf->address, pfn);
222 static int vvar_mremap(const struct vm_special_mapping *sm,
223 struct vm_area_struct *new_vma)
225 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
227 if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
233 static int __setup_additional_pages(enum vdso_abi abi,
234 struct mm_struct *mm,
235 struct linux_binprm *bprm,
238 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
239 unsigned long gp_flags = 0;
242 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
244 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
245 /* Be sure to map the data page */
246 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
248 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
249 if (IS_ERR_VALUE(vdso_base)) {
250 ret = ERR_PTR(vdso_base);
254 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
255 VM_READ|VM_MAYREAD|VM_PFNMAP,
260 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
261 gp_flags = VM_ARM64_BTI;
263 vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
264 mm->context.vdso = (void *)vdso_base;
265 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
266 VM_READ|VM_EXEC|gp_flags|
267 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
275 mm->context.vdso = NULL;
281 * Create and map the vectors page for AArch32 tasks.
283 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
284 struct vm_area_struct *new_vma)
286 return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
290 AA32_MAP_VECTORS, /* kuser helpers */
296 static struct page *aarch32_vectors_page __ro_after_init;
297 static struct page *aarch32_sig_page __ro_after_init;
299 static struct vm_special_mapping aarch32_vdso_maps[] = {
300 [AA32_MAP_VECTORS] = {
301 .name = "[vectors]", /* ABI */
302 .pages = &aarch32_vectors_page,
304 [AA32_MAP_SIGPAGE] = {
305 .name = "[sigpage]", /* ABI */
306 .pages = &aarch32_sig_page,
311 .mremap = vvar_mremap,
315 .mremap = aarch32_vdso_mremap,
319 static int aarch32_alloc_kuser_vdso_page(void)
321 extern char __kuser_helper_start[], __kuser_helper_end[];
322 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
323 unsigned long vdso_page;
325 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
328 vdso_page = get_zeroed_page(GFP_ATOMIC);
332 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
334 aarch32_vectors_page = virt_to_page(vdso_page);
335 flush_dcache_page(aarch32_vectors_page);
339 static int aarch32_alloc_sigpage(void)
341 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
342 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
343 unsigned long sigpage;
345 sigpage = get_zeroed_page(GFP_ATOMIC);
349 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
350 aarch32_sig_page = virt_to_page(sigpage);
351 flush_dcache_page(aarch32_sig_page);
355 static int __aarch32_alloc_vdso_pages(void)
358 if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
361 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
362 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
364 return __vdso_init(VDSO_ABI_AA32);
367 static int __init aarch32_alloc_vdso_pages(void)
371 ret = __aarch32_alloc_vdso_pages();
375 ret = aarch32_alloc_sigpage();
379 return aarch32_alloc_kuser_vdso_page();
381 arch_initcall(aarch32_alloc_vdso_pages);
383 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
387 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
391 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
392 * not safe to CoW the page containing the CPU exception vectors.
394 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
396 VM_MAYREAD | VM_MAYEXEC,
397 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
399 return PTR_ERR_OR_ZERO(ret);
402 static int aarch32_sigreturn_setup(struct mm_struct *mm)
407 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
408 if (IS_ERR_VALUE(addr)) {
414 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
417 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
418 VM_READ | VM_EXEC | VM_MAYREAD |
419 VM_MAYWRITE | VM_MAYEXEC,
420 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
424 mm->context.sigpage = (void *)addr;
427 return PTR_ERR_OR_ZERO(ret);
430 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
432 struct mm_struct *mm = current->mm;
435 if (mmap_write_lock_killable(mm))
438 ret = aarch32_kuser_helpers_setup(mm);
442 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
443 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
449 ret = aarch32_sigreturn_setup(mm);
451 mmap_write_unlock(mm);
454 #endif /* CONFIG_COMPAT */
456 static int vdso_mremap(const struct vm_special_mapping *sm,
457 struct vm_area_struct *new_vma)
459 return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
467 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
471 .mremap = vvar_mremap,
475 .mremap = vdso_mremap,
479 static int __init vdso_init(void)
481 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
482 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
484 return __vdso_init(VDSO_ABI_AA64);
486 arch_initcall(vdso_init);
488 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
490 struct mm_struct *mm = current->mm;
493 if (mmap_write_lock_killable(mm))
496 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
497 mmap_write_unlock(mm);