d2fd8a57af7dd2d4a3d7d6fab94743107666f946
[linux-2.6-microblaze.git] / arch / x86 / entry / vdso / vma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  *
5  * This contains most of the x86 vDSO kernel-side code.
6  */
7 #include <linux/mm.h>
8 #include <linux/err.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/random.h>
14 #include <linux/elf.h>
15 #include <linux/cpu.h>
16 #include <linux/ptrace.h>
17 #include <linux/time_namespace.h>
18
19 #include <asm/pvclock.h>
20 #include <asm/vgtod.h>
21 #include <asm/proto.h>
22 #include <asm/vdso.h>
23 #include <asm/vvar.h>
24 #include <asm/tlb.h>
25 #include <asm/page.h>
26 #include <asm/desc.h>
27 #include <asm/cpufeature.h>
28 #include <clocksource/hyperv_timer.h>
29
30 #undef _ASM_X86_VVAR_H
31 #define EMIT_VVAR(name, offset) \
32         const size_t name ## _offset = offset;
33 #include <asm/vvar.h>
34
35 struct vdso_data *arch_get_vdso_data(void *vvar_page)
36 {
37         return (struct vdso_data *)(vvar_page + _vdso_data_offset);
38 }
39 #undef EMIT_VVAR
40
41 #if defined(CONFIG_X86_64)
42 unsigned int __read_mostly vdso64_enabled = 1;
43 #endif
44
45 void __init init_vdso_image(const struct vdso_image *image)
46 {
47         BUG_ON(image->size % PAGE_SIZE != 0);
48
49         apply_alternatives((struct alt_instr *)(image->data + image->alt),
50                            (struct alt_instr *)(image->data + image->alt +
51                                                 image->alt_len));
52 }
53
54 struct linux_binprm;
55
56 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
57                       struct vm_area_struct *vma, struct vm_fault *vmf)
58 {
59         const struct vdso_image *image = vma->vm_mm->context.vdso_image;
60
61         if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
62                 return VM_FAULT_SIGBUS;
63
64         vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
65         get_page(vmf->page);
66         return 0;
67 }
68
69 static void vdso_fix_landing(const struct vdso_image *image,
70                 struct vm_area_struct *new_vma)
71 {
72 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
73         if (in_ia32_syscall() && image == &vdso_image_32) {
74                 struct pt_regs *regs = current_pt_regs();
75                 unsigned long vdso_land = image->sym_int80_landing_pad;
76                 unsigned long old_land_addr = vdso_land +
77                         (unsigned long)current->mm->context.vdso;
78
79                 /* Fixing userspace landing - look at do_fast_syscall_32 */
80                 if (regs->ip == old_land_addr)
81                         regs->ip = new_vma->vm_start + vdso_land;
82         }
83 #endif
84 }
85
86 static int vdso_mremap(const struct vm_special_mapping *sm,
87                 struct vm_area_struct *new_vma)
88 {
89         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
90         const struct vdso_image *image = current->mm->context.vdso_image;
91
92         if (image->size != new_size)
93                 return -EINVAL;
94
95         vdso_fix_landing(image, new_vma);
96         current->mm->context.vdso = (void __user *)new_vma->vm_start;
97
98         return 0;
99 }
100
101 static int vvar_mremap(const struct vm_special_mapping *sm,
102                 struct vm_area_struct *new_vma)
103 {
104         const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
105         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
106
107         if (new_size != -image->sym_vvar_start)
108                 return -EINVAL;
109
110         return 0;
111 }
112
113 #ifdef CONFIG_TIME_NS
114 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
115 {
116         if (likely(vma->vm_mm == current->mm))
117                 return current->nsproxy->time_ns->vvar_page;
118
119         /*
120          * VM_PFNMAP | VM_IO protect .fault() handler from being called
121          * through interfaces like /proc/$pid/mem or
122          * process_vm_{readv,writev}() as long as there's no .access()
123          * in special_mapping_vmops().
124          * For more details check_vma_flags() and __access_remote_vm()
125          */
126
127         WARN(1, "vvar_page accessed remotely");
128
129         return NULL;
130 }
131 #else
132 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
133 {
134         return NULL;
135 }
136 #endif
137
138 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
139                       struct vm_area_struct *vma, struct vm_fault *vmf)
140 {
141         const struct vdso_image *image = vma->vm_mm->context.vdso_image;
142         unsigned long pfn;
143         long sym_offset;
144
145         if (!image)
146                 return VM_FAULT_SIGBUS;
147
148         sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
149                 image->sym_vvar_start;
150
151         /*
152          * Sanity check: a symbol offset of zero means that the page
153          * does not exist for this vdso image, not that the page is at
154          * offset zero relative to the text mapping.  This should be
155          * impossible here, because sym_offset should only be zero for
156          * the page past the end of the vvar mapping.
157          */
158         if (sym_offset == 0)
159                 return VM_FAULT_SIGBUS;
160
161         if (sym_offset == image->sym_vvar_page) {
162                 struct page *timens_page = find_timens_vvar_page(vma);
163
164                 pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
165
166                 /*
167                  * If a task belongs to a time namespace then a namespace
168                  * specific VVAR is mapped with the sym_vvar_page offset and
169                  * the real VVAR page is mapped with the sym_timens_page
170                  * offset.
171                  * See also the comment near timens_setup_vdso_data().
172                  */
173                 if (timens_page) {
174                         unsigned long addr;
175                         vm_fault_t err;
176
177                         /*
178                          * Optimization: inside time namespace pre-fault
179                          * VVAR page too. As on timens page there are only
180                          * offsets for clocks on VVAR, it'll be faulted
181                          * shortly by VDSO code.
182                          */
183                         addr = vmf->address + (image->sym_timens_page - sym_offset);
184                         err = vmf_insert_pfn(vma, addr, pfn);
185                         if (unlikely(err & VM_FAULT_ERROR))
186                                 return err;
187
188                         pfn = page_to_pfn(timens_page);
189                 }
190
191                 return vmf_insert_pfn(vma, vmf->address, pfn);
192         } else if (sym_offset == image->sym_pvclock_page) {
193                 struct pvclock_vsyscall_time_info *pvti =
194                         pvclock_get_pvti_cpu0_va();
195                 if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
196                         return vmf_insert_pfn_prot(vma, vmf->address,
197                                         __pa(pvti) >> PAGE_SHIFT,
198                                         pgprot_decrypted(vma->vm_page_prot));
199                 }
200         } else if (sym_offset == image->sym_hvclock_page) {
201                 struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
202
203                 if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
204                         return vmf_insert_pfn(vma, vmf->address,
205                                         virt_to_phys(tsc_pg) >> PAGE_SHIFT);
206         } else if (sym_offset == image->sym_timens_page) {
207                 struct page *timens_page = find_timens_vvar_page(vma);
208
209                 if (!timens_page)
210                         return VM_FAULT_SIGBUS;
211
212                 pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
213                 return vmf_insert_pfn(vma, vmf->address, pfn);
214         }
215
216         return VM_FAULT_SIGBUS;
217 }
218
219 static const struct vm_special_mapping vdso_mapping = {
220         .name = "[vdso]",
221         .fault = vdso_fault,
222         .mremap = vdso_mremap,
223 };
224 static const struct vm_special_mapping vvar_mapping = {
225         .name = "[vvar]",
226         .fault = vvar_fault,
227         .mremap = vvar_mremap,
228 };
229
230 /*
231  * Add vdso and vvar mappings to current process.
232  * @image          - blob to map
233  * @addr           - request a specific address (zero to map at free addr)
234  */
235 static int map_vdso(const struct vdso_image *image, unsigned long addr)
236 {
237         struct mm_struct *mm = current->mm;
238         struct vm_area_struct *vma;
239         unsigned long text_start;
240         int ret = 0;
241
242         if (down_write_killable(&mm->mmap_sem))
243                 return -EINTR;
244
245         addr = get_unmapped_area(NULL, addr,
246                                  image->size - image->sym_vvar_start, 0, 0);
247         if (IS_ERR_VALUE(addr)) {
248                 ret = addr;
249                 goto up_fail;
250         }
251
252         text_start = addr - image->sym_vvar_start;
253
254         /*
255          * MAYWRITE to allow gdb to COW and set breakpoints
256          */
257         vma = _install_special_mapping(mm,
258                                        text_start,
259                                        image->size,
260                                        VM_READ|VM_EXEC|
261                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
262                                        &vdso_mapping);
263
264         if (IS_ERR(vma)) {
265                 ret = PTR_ERR(vma);
266                 goto up_fail;
267         }
268
269         vma = _install_special_mapping(mm,
270                                        addr,
271                                        -image->sym_vvar_start,
272                                        VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
273                                        VM_PFNMAP,
274                                        &vvar_mapping);
275
276         if (IS_ERR(vma)) {
277                 ret = PTR_ERR(vma);
278                 do_munmap(mm, text_start, image->size, NULL);
279         } else {
280                 current->mm->context.vdso = (void __user *)text_start;
281                 current->mm->context.vdso_image = image;
282         }
283
284 up_fail:
285         up_write(&mm->mmap_sem);
286         return ret;
287 }
288
289 #ifdef CONFIG_X86_64
290 /*
291  * Put the vdso above the (randomized) stack with another randomized
292  * offset.  This way there is no hole in the middle of address space.
293  * To save memory make sure it is still in the same PTE as the stack
294  * top.  This doesn't give that many random bits.
295  *
296  * Note that this algorithm is imperfect: the distribution of the vdso
297  * start address within a PMD is biased toward the end.
298  *
299  * Only used for the 64-bit and x32 vdsos.
300  */
301 static unsigned long vdso_addr(unsigned long start, unsigned len)
302 {
303         unsigned long addr, end;
304         unsigned offset;
305
306         /*
307          * Round up the start address.  It can start out unaligned as a result
308          * of stack start randomization.
309          */
310         start = PAGE_ALIGN(start);
311
312         /* Round the lowest possible end address up to a PMD boundary. */
313         end = (start + len + PMD_SIZE - 1) & PMD_MASK;
314         if (end >= TASK_SIZE_MAX)
315                 end = TASK_SIZE_MAX;
316         end -= len;
317
318         if (end > start) {
319                 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
320                 addr = start + (offset << PAGE_SHIFT);
321         } else {
322                 addr = start;
323         }
324
325         /*
326          * Forcibly align the final address in case we have a hardware
327          * issue that requires alignment for performance reasons.
328          */
329         addr = align_vdso_addr(addr);
330
331         return addr;
332 }
333
334 static int map_vdso_randomized(const struct vdso_image *image)
335 {
336         unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
337
338         return map_vdso(image, addr);
339 }
340 #endif
341
342 int map_vdso_once(const struct vdso_image *image, unsigned long addr)
343 {
344         struct mm_struct *mm = current->mm;
345         struct vm_area_struct *vma;
346
347         down_write(&mm->mmap_sem);
348         /*
349          * Check if we have already mapped vdso blob - fail to prevent
350          * abusing from userspace install_speciall_mapping, which may
351          * not do accounting and rlimit right.
352          * We could search vma near context.vdso, but it's a slowpath,
353          * so let's explicitly check all VMAs to be completely sure.
354          */
355         for (vma = mm->mmap; vma; vma = vma->vm_next) {
356                 if (vma_is_special_mapping(vma, &vdso_mapping) ||
357                                 vma_is_special_mapping(vma, &vvar_mapping)) {
358                         up_write(&mm->mmap_sem);
359                         return -EEXIST;
360                 }
361         }
362         up_write(&mm->mmap_sem);
363
364         return map_vdso(image, addr);
365 }
366
367 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
368 static int load_vdso32(void)
369 {
370         if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
371                 return 0;
372
373         return map_vdso(&vdso_image_32, 0);
374 }
375 #endif
376
377 #ifdef CONFIG_X86_64
378 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
379 {
380         if (!vdso64_enabled)
381                 return 0;
382
383         return map_vdso_randomized(&vdso_image_64);
384 }
385
386 #ifdef CONFIG_COMPAT
387 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
388                                        int uses_interp)
389 {
390 #ifdef CONFIG_X86_X32_ABI
391         if (test_thread_flag(TIF_X32)) {
392                 if (!vdso64_enabled)
393                         return 0;
394                 return map_vdso_randomized(&vdso_image_x32);
395         }
396 #endif
397 #ifdef CONFIG_IA32_EMULATION
398         return load_vdso32();
399 #else
400         return 0;
401 #endif
402 }
403 #endif
404 #else
405 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
406 {
407         return load_vdso32();
408 }
409 #endif
410
411 #ifdef CONFIG_X86_64
412 static __init int vdso_setup(char *s)
413 {
414         vdso64_enabled = simple_strtoul(s, NULL, 0);
415         return 0;
416 }
417 __setup("vdso=", vdso_setup);
418
419 static int __init init_vdso(void)
420 {
421         init_vdso_image(&vdso_image_64);
422
423 #ifdef CONFIG_X86_X32_ABI
424         init_vdso_image(&vdso_image_x32);
425 #endif
426
427         return 0;
428 }
429 subsys_initcall(init_vdso);
430 #endif /* CONFIG_X86_64 */