Merge remote-tracking branch 'regulator/for-5.15' into regulator-next
[linux-2.6-microblaze.git] / arch / s390 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/time_namespace.h>
19 #include <vdso/datapage.h>
20 #include <asm/vdso.h>
21
22 extern char vdso64_start[], vdso64_end[];
23 extern char vdso32_start[], vdso32_end[];
24
25 static struct vm_special_mapping vvar_mapping;
26
27 static union {
28         struct vdso_data        data[CS_BASES];
29         u8                      page[PAGE_SIZE];
30 } vdso_data_store __page_aligned_data;
31
32 struct vdso_data *vdso_data = vdso_data_store.data;
33
34 enum vvar_pages {
35         VVAR_DATA_PAGE_OFFSET,
36         VVAR_TIMENS_PAGE_OFFSET,
37         VVAR_NR_PAGES,
38 };
39
40 #ifdef CONFIG_TIME_NS
41 struct vdso_data *arch_get_vdso_data(void *vvar_page)
42 {
43         return (struct vdso_data *)(vvar_page);
44 }
45
46 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
47 {
48         if (likely(vma->vm_mm == current->mm))
49                 return current->nsproxy->time_ns->vvar_page;
50         /*
51          * VM_PFNMAP | VM_IO protect .fault() handler from being called
52          * through interfaces like /proc/$pid/mem or
53          * process_vm_{readv,writev}() as long as there's no .access()
54          * in special_mapping_vmops().
55          * For more details check_vma_flags() and __access_remote_vm()
56          */
57         WARN(1, "vvar_page accessed remotely");
58         return NULL;
59 }
60
61 /*
62  * The VVAR page layout depends on whether a task belongs to the root or
63  * non-root time namespace. Whenever a task changes its namespace, the VVAR
64  * page tables are cleared and then they will be re-faulted with a
65  * corresponding layout.
66  * See also the comment near timens_setup_vdso_data() for details.
67  */
68 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
69 {
70         struct mm_struct *mm = task->mm;
71         struct vm_area_struct *vma;
72
73         mmap_read_lock(mm);
74         for (vma = mm->mmap; vma; vma = vma->vm_next) {
75                 unsigned long size = vma->vm_end - vma->vm_start;
76
77                 if (!vma_is_special_mapping(vma, &vvar_mapping))
78                         continue;
79                 zap_page_range(vma, vma->vm_start, size);
80                 break;
81         }
82         mmap_read_unlock(mm);
83         return 0;
84 }
85 #else
86 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
87 {
88         return NULL;
89 }
90 #endif
91
92 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
93                              struct vm_area_struct *vma, struct vm_fault *vmf)
94 {
95         struct page *timens_page = find_timens_vvar_page(vma);
96         unsigned long addr, pfn;
97         vm_fault_t err;
98
99         switch (vmf->pgoff) {
100         case VVAR_DATA_PAGE_OFFSET:
101                 pfn = virt_to_pfn(vdso_data);
102                 if (timens_page) {
103                         /*
104                          * Fault in VVAR page too, since it will be accessed
105                          * to get clock data anyway.
106                          */
107                         addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
108                         err = vmf_insert_pfn(vma, addr, pfn);
109                         if (unlikely(err & VM_FAULT_ERROR))
110                                 return err;
111                         pfn = page_to_pfn(timens_page);
112                 }
113                 break;
114 #ifdef CONFIG_TIME_NS
115         case VVAR_TIMENS_PAGE_OFFSET:
116                 /*
117                  * If a task belongs to a time namespace then a namespace
118                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
119                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
120                  * offset.
121                  * See also the comment near timens_setup_vdso_data().
122                  */
123                 if (!timens_page)
124                         return VM_FAULT_SIGBUS;
125                 pfn = virt_to_pfn(vdso_data);
126                 break;
127 #endif /* CONFIG_TIME_NS */
128         default:
129                 return VM_FAULT_SIGBUS;
130         }
131         return vmf_insert_pfn(vma, vmf->address, pfn);
132 }
133
134 static int vdso_mremap(const struct vm_special_mapping *sm,
135                        struct vm_area_struct *vma)
136 {
137         current->mm->context.vdso_base = vma->vm_start;
138         return 0;
139 }
140
141 static struct vm_special_mapping vvar_mapping = {
142         .name = "[vvar]",
143         .fault = vvar_fault,
144 };
145
146 static struct vm_special_mapping vdso64_mapping = {
147         .name = "[vdso]",
148         .mremap = vdso_mremap,
149 };
150
151 static struct vm_special_mapping vdso32_mapping = {
152         .name = "[vdso]",
153         .mremap = vdso_mremap,
154 };
155
156 int vdso_getcpu_init(void)
157 {
158         set_tod_programmable_field(smp_processor_id());
159         return 0;
160 }
161 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
162
163 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
164 {
165         unsigned long vdso_text_len, vdso_mapping_len;
166         unsigned long vvar_start, vdso_text_start;
167         struct vm_special_mapping *vdso_mapping;
168         struct mm_struct *mm = current->mm;
169         struct vm_area_struct *vma;
170         int rc;
171
172         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
173         if (mmap_write_lock_killable(mm))
174                 return -EINTR;
175
176         if (is_compat_task()) {
177                 vdso_text_len = vdso32_end - vdso32_start;
178                 vdso_mapping = &vdso32_mapping;
179         } else {
180                 vdso_text_len = vdso64_end - vdso64_start;
181                 vdso_mapping = &vdso64_mapping;
182         }
183         vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
184         vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
185         rc = vvar_start;
186         if (IS_ERR_VALUE(vvar_start))
187                 goto out;
188         vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
189                                        VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
190                                        VM_PFNMAP,
191                                        &vvar_mapping);
192         rc = PTR_ERR(vma);
193         if (IS_ERR(vma))
194                 goto out;
195         vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
196         /* VM_MAYWRITE for COW so gdb can set breakpoints */
197         vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
198                                        VM_READ|VM_EXEC|
199                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
200                                        vdso_mapping);
201         if (IS_ERR(vma)) {
202                 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
203                 rc = PTR_ERR(vma);
204         } else {
205                 current->mm->context.vdso_base = vdso_text_start;
206                 rc = 0;
207         }
208 out:
209         mmap_write_unlock(mm);
210         return rc;
211 }
212
213 static struct page ** __init vdso_setup_pages(void *start, void *end)
214 {
215         int pages = (end - start) >> PAGE_SHIFT;
216         struct page **pagelist;
217         int i;
218
219         pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
220         if (!pagelist)
221                 panic("%s: Cannot allocate page list for VDSO", __func__);
222         for (i = 0; i < pages; i++)
223                 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
224         return pagelist;
225 }
226
227 static int __init vdso_init(void)
228 {
229         vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
230         if (IS_ENABLED(CONFIG_COMPAT))
231                 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
232         return 0;
233 }
234 arch_initcall(vdso_init);