1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/smp.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/slab.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/memblock.h>
22 #include <linux/compat.h>
23 #include <linux/binfmts.h>
24 #include <vdso/datapage.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/processor.h>
28 #include <asm/mmu_context.h>
29 #include <asm/sections.h>
31 #include <asm/facility.h>
32 #include <asm/timex.h>
34 extern char vdso64_start, vdso64_end;
35 static void *vdso64_kbase = &vdso64_start;
36 static unsigned int vdso64_pages;
37 static struct page **vdso64_pagelist;
40 * Should the kernel map a VDSO page into processes and pass its
41 * address down to glibc upon exec()?
43 unsigned int __read_mostly vdso_enabled = 1;
45 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
46 struct vm_area_struct *vma, struct vm_fault *vmf)
48 struct page **vdso_pagelist;
49 unsigned long vdso_pages;
51 vdso_pagelist = vdso64_pagelist;
52 vdso_pages = vdso64_pages;
54 if (vmf->pgoff >= vdso_pages)
55 return VM_FAULT_SIGBUS;
57 vmf->page = vdso_pagelist[vmf->pgoff];
62 static int vdso_mremap(const struct vm_special_mapping *sm,
63 struct vm_area_struct *vma)
65 current->mm->context.vdso_base = vma->vm_start;
70 static const struct vm_special_mapping vdso_mapping = {
73 .mremap = vdso_mremap,
76 static int __init vdso_setup(char *str)
80 if (!kstrtobool(str, &enabled))
81 vdso_enabled = enabled;
84 __setup("vdso=", vdso_setup);
90 struct vdso_data data;
92 } vdso_data_store __page_aligned_data;
93 struct vdso_data *vdso_data = (struct vdso_data *)&vdso_data_store.data;
95 void vdso_getcpu_init(void)
97 set_tod_programmable_field(smp_processor_id());
101 * This is called from binfmt_elf, we create the special vma for the
102 * vDSO and insert it into the mm struct tree
104 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
106 struct mm_struct *mm = current->mm;
107 struct vm_area_struct *vma;
108 unsigned long vdso_pages;
109 unsigned long vdso_base;
115 if (is_compat_task())
118 vdso_pages = vdso64_pages;
120 * vDSO has a problem and was disabled, just don't "enable" it for
127 * pick a base address for the vDSO in process space. We try to put
128 * it at vdso_base which is the "natural" base for it, but we might
129 * fail and end up putting it elsewhere.
131 if (mmap_write_lock_killable(mm))
133 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
134 if (IS_ERR_VALUE(vdso_base)) {
140 * our vma flags don't have VM_WRITE so by default, the process
141 * isn't allowed to write those pages.
142 * gdb can break that with ptrace interface, and thus trigger COW
143 * on those pages but it's then your responsibility to never do that
144 * on the "data" page of the vDSO or you'll stop getting kernel
145 * updates and your nice userland gettimeofday will be totally dead.
146 * It's fine to use that for setting breakpoints in the vDSO code
149 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
151 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
158 current->mm->context.vdso_base = vdso_base;
162 mmap_write_unlock(mm);
166 static int __init vdso_init(void)
171 /* Calculate the size of the 64 bit vDSO */
172 vdso64_pages = ((&vdso64_end - &vdso64_start
173 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
175 /* Make sure pages are in the correct state */
176 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
178 BUG_ON(vdso64_pagelist == NULL);
179 for (i = 0; i < vdso64_pages - 1; i++) {
180 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
182 vdso64_pagelist[i] = pg;
184 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
185 vdso64_pagelist[vdso64_pages] = NULL;
187 get_page(virt_to_page(vdso_data));
191 early_initcall(vdso_init);