1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2007 Andi Kleen, SUSE Labs.
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset) \
32 const size_t name ## _offset = offset;
35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
37 return (struct vdso_data *)(vvar_page + _vdso_data_offset);
41 #if defined(CONFIG_X86_64)
42 unsigned int __read_mostly vdso64_enabled = 1;
45 void __init init_vdso_image(const struct vdso_image *image)
47 BUG_ON(image->size % PAGE_SIZE != 0);
49 apply_alternatives((struct alt_instr *)(image->data + image->alt),
50 (struct alt_instr *)(image->data + image->alt +
56 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
57 struct vm_area_struct *vma, struct vm_fault *vmf)
59 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
61 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
62 return VM_FAULT_SIGBUS;
64 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
69 static void vdso_fix_landing(const struct vdso_image *image,
70 struct vm_area_struct *new_vma)
72 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
73 if (in_ia32_syscall() && image == &vdso_image_32) {
74 struct pt_regs *regs = current_pt_regs();
75 unsigned long vdso_land = image->sym_int80_landing_pad;
76 unsigned long old_land_addr = vdso_land +
77 (unsigned long)current->mm->context.vdso;
79 /* Fixing userspace landing - look at do_fast_syscall_32 */
80 if (regs->ip == old_land_addr)
81 regs->ip = new_vma->vm_start + vdso_land;
86 static int vdso_mremap(const struct vm_special_mapping *sm,
87 struct vm_area_struct *new_vma)
89 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
90 const struct vdso_image *image = current->mm->context.vdso_image;
92 if (image->size != new_size)
95 vdso_fix_landing(image, new_vma);
96 current->mm->context.vdso = (void __user *)new_vma->vm_start;
101 static int vvar_mremap(const struct vm_special_mapping *sm,
102 struct vm_area_struct *new_vma)
104 const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
105 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
107 if (new_size != -image->sym_vvar_start)
113 #ifdef CONFIG_TIME_NS
114 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
116 if (likely(vma->vm_mm == current->mm))
117 return current->nsproxy->time_ns->vvar_page;
120 * VM_PFNMAP | VM_IO protect .fault() handler from being called
121 * through interfaces like /proc/$pid/mem or
122 * process_vm_{readv,writev}() as long as there's no .access()
123 * in special_mapping_vmops().
124 * For more details check_vma_flags() and __access_remote_vm()
127 WARN(1, "vvar_page accessed remotely");
132 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
138 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
139 struct vm_area_struct *vma, struct vm_fault *vmf)
141 const struct vdso_image *image = vma->vm_mm->context.vdso_image;
146 return VM_FAULT_SIGBUS;
148 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
149 image->sym_vvar_start;
152 * Sanity check: a symbol offset of zero means that the page
153 * does not exist for this vdso image, not that the page is at
154 * offset zero relative to the text mapping. This should be
155 * impossible here, because sym_offset should only be zero for
156 * the page past the end of the vvar mapping.
159 return VM_FAULT_SIGBUS;
161 if (sym_offset == image->sym_vvar_page) {
162 struct page *timens_page = find_timens_vvar_page(vma);
164 pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
167 * If a task belongs to a time namespace then a namespace
168 * specific VVAR is mapped with the sym_vvar_page offset and
169 * the real VVAR page is mapped with the sym_timens_page
171 * See also the comment near timens_setup_vdso_data().
178 * Optimization: inside time namespace pre-fault
179 * VVAR page too. As on timens page there are only
180 * offsets for clocks on VVAR, it'll be faulted
181 * shortly by VDSO code.
183 addr = vmf->address + (image->sym_timens_page - sym_offset);
184 err = vmf_insert_pfn(vma, addr, pfn);
185 if (unlikely(err & VM_FAULT_ERROR))
188 pfn = page_to_pfn(timens_page);
191 return vmf_insert_pfn(vma, vmf->address, pfn);
192 } else if (sym_offset == image->sym_pvclock_page) {
193 struct pvclock_vsyscall_time_info *pvti =
194 pvclock_get_pvti_cpu0_va();
195 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
196 return vmf_insert_pfn_prot(vma, vmf->address,
197 __pa(pvti) >> PAGE_SHIFT,
198 pgprot_decrypted(vma->vm_page_prot));
200 } else if (sym_offset == image->sym_hvclock_page) {
201 struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
203 if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
204 return vmf_insert_pfn(vma, vmf->address,
205 virt_to_phys(tsc_pg) >> PAGE_SHIFT);
206 } else if (sym_offset == image->sym_timens_page) {
207 struct page *timens_page = find_timens_vvar_page(vma);
210 return VM_FAULT_SIGBUS;
212 pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
213 return vmf_insert_pfn(vma, vmf->address, pfn);
216 return VM_FAULT_SIGBUS;
219 static const struct vm_special_mapping vdso_mapping = {
222 .mremap = vdso_mremap,
224 static const struct vm_special_mapping vvar_mapping = {
227 .mremap = vvar_mremap,
231 * Add vdso and vvar mappings to current process.
232 * @image - blob to map
233 * @addr - request a specific address (zero to map at free addr)
235 static int map_vdso(const struct vdso_image *image, unsigned long addr)
237 struct mm_struct *mm = current->mm;
238 struct vm_area_struct *vma;
239 unsigned long text_start;
242 if (down_write_killable(&mm->mmap_sem))
245 addr = get_unmapped_area(NULL, addr,
246 image->size - image->sym_vvar_start, 0, 0);
247 if (IS_ERR_VALUE(addr)) {
252 text_start = addr - image->sym_vvar_start;
255 * MAYWRITE to allow gdb to COW and set breakpoints
257 vma = _install_special_mapping(mm,
261 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
269 vma = _install_special_mapping(mm,
271 -image->sym_vvar_start,
272 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
278 do_munmap(mm, text_start, image->size, NULL);
280 current->mm->context.vdso = (void __user *)text_start;
281 current->mm->context.vdso_image = image;
285 up_write(&mm->mmap_sem);
291 * Put the vdso above the (randomized) stack with another randomized
292 * offset. This way there is no hole in the middle of address space.
293 * To save memory make sure it is still in the same PTE as the stack
294 * top. This doesn't give that many random bits.
296 * Note that this algorithm is imperfect: the distribution of the vdso
297 * start address within a PMD is biased toward the end.
299 * Only used for the 64-bit and x32 vdsos.
301 static unsigned long vdso_addr(unsigned long start, unsigned len)
303 unsigned long addr, end;
307 * Round up the start address. It can start out unaligned as a result
308 * of stack start randomization.
310 start = PAGE_ALIGN(start);
312 /* Round the lowest possible end address up to a PMD boundary. */
313 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
314 if (end >= TASK_SIZE_MAX)
319 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
320 addr = start + (offset << PAGE_SHIFT);
326 * Forcibly align the final address in case we have a hardware
327 * issue that requires alignment for performance reasons.
329 addr = align_vdso_addr(addr);
334 static int map_vdso_randomized(const struct vdso_image *image)
336 unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
338 return map_vdso(image, addr);
342 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
344 struct mm_struct *mm = current->mm;
345 struct vm_area_struct *vma;
347 down_write(&mm->mmap_sem);
349 * Check if we have already mapped vdso blob - fail to prevent
350 * abusing from userspace install_speciall_mapping, which may
351 * not do accounting and rlimit right.
352 * We could search vma near context.vdso, but it's a slowpath,
353 * so let's explicitly check all VMAs to be completely sure.
355 for (vma = mm->mmap; vma; vma = vma->vm_next) {
356 if (vma_is_special_mapping(vma, &vdso_mapping) ||
357 vma_is_special_mapping(vma, &vvar_mapping)) {
358 up_write(&mm->mmap_sem);
362 up_write(&mm->mmap_sem);
364 return map_vdso(image, addr);
367 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
368 static int load_vdso32(void)
370 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
373 return map_vdso(&vdso_image_32, 0);
378 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
383 return map_vdso_randomized(&vdso_image_64);
387 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
390 #ifdef CONFIG_X86_X32_ABI
391 if (test_thread_flag(TIF_X32)) {
394 return map_vdso_randomized(&vdso_image_x32);
397 #ifdef CONFIG_IA32_EMULATION
398 return load_vdso32();
405 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
407 return load_vdso32();
412 static __init int vdso_setup(char *s)
414 vdso64_enabled = simple_strtoul(s, NULL, 0);
417 __setup("vdso=", vdso_setup);
419 static int __init init_vdso(void)
421 init_vdso_image(&vdso_image_64);
423 #ifdef CONFIG_X86_X32_ABI
424 init_vdso_image(&vdso_image_x32);
429 subsys_initcall(init_vdso);
430 #endif /* CONFIG_X86_64 */