s390/vdso: misc simple code changes
[linux-2.6-microblaze.git] / arch / s390 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <vdso/datapage.h>
19 #include <asm/vdso.h>
20
21 extern char vdso64_start[], vdso64_end[];
22 static unsigned int vdso_pages;
23 static struct page **vdso_pagelist;
24
25 static union {
26         struct vdso_data        data[CS_BASES];
27         u8                      page[PAGE_SIZE];
28 } vdso_data_store __page_aligned_data;
29
30 struct vdso_data *vdso_data = vdso_data_store.data;
31
32 unsigned int __read_mostly vdso_enabled = 1;
33
34 static int __init vdso_setup(char *str)
35 {
36         bool enabled;
37
38         if (!kstrtobool(str, &enabled))
39                 vdso_enabled = enabled;
40         return 1;
41 }
42 __setup("vdso=", vdso_setup);
43
44 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
45                       struct vm_area_struct *vma, struct vm_fault *vmf)
46 {
47         if (vmf->pgoff >= vdso_pages)
48                 return VM_FAULT_SIGBUS;
49
50         vmf->page = vdso_pagelist[vmf->pgoff];
51         get_page(vmf->page);
52         return 0;
53 }
54
55 static int vdso_mremap(const struct vm_special_mapping *sm,
56                        struct vm_area_struct *vma)
57 {
58         current->mm->context.vdso_base = vma->vm_start;
59         return 0;
60 }
61
62 static const struct vm_special_mapping vdso_mapping = {
63         .name = "[vdso]",
64         .fault = vdso_fault,
65         .mremap = vdso_mremap,
66 };
67
68 int vdso_getcpu_init(void)
69 {
70         set_tod_programmable_field(smp_processor_id());
71         return 0;
72 }
73 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
74
75 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
76 {
77         struct mm_struct *mm = current->mm;
78         struct vm_area_struct *vma;
79         unsigned long vdso_base;
80         int rc;
81
82         if (!vdso_enabled || is_compat_task())
83                 return 0;
84         if (mmap_write_lock_killable(mm))
85                 return -EINTR;
86         vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
87         rc = vdso_base;
88         if (IS_ERR_VALUE(vdso_base))
89                 goto out;
90         /*
91          * our vma flags don't have VM_WRITE so by default, the process
92          * isn't allowed to write those pages.
93          * gdb can break that with ptrace interface, and thus trigger COW
94          * on those pages but it's then your responsibility to never do that
95          * on the "data" page of the vDSO or you'll stop getting kernel
96          * updates and your nice userland gettimeofday will be totally dead.
97          * It's fine to use that for setting breakpoints in the vDSO code
98          * pages though.
99          */
100         vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
101                                        VM_READ|VM_EXEC|
102                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
103                                        &vdso_mapping);
104         rc = PTR_ERR(vma);
105         if (IS_ERR(vma))
106                 goto out;
107         current->mm->context.vdso_base = vdso_base;
108         rc = 0;
109 out:
110         mmap_write_unlock(mm);
111         return rc;
112 }
113
114 static int __init vdso_init(void)
115 {
116         int i;
117
118         vdso_pages = ((vdso64_end - vdso64_start) >> PAGE_SHIFT) + 1;
119         /* Make sure pages are in the correct state */
120         vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
121                                 GFP_KERNEL);
122         if (!vdso_pagelist) {
123                 vdso_enabled = 0;
124                 return -ENOMEM;
125         }
126         for (i = 0; i < vdso_pages - 1; i++) {
127                 struct page *pg = virt_to_page(vdso64_start + i * PAGE_SIZE);
128                 get_page(pg);
129                 vdso_pagelist[i] = pg;
130         }
131         vdso_pagelist[vdso_pages - 1] = virt_to_page(vdso_data);
132         vdso_pagelist[vdso_pages] = NULL;
133         get_page(virt_to_page(vdso_data));
134         return 0;
135 }
136 arch_initcall(vdso_init);