opp: Always add entries in dev_list with opp_table->lock held
[linux-2.6-microblaze.git] / mm / process_vm_access.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * linux/mm/process_vm_access.c
4  *
5  * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/uio.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/highmem.h>
13 #include <linux/ptrace.h>
14 #include <linux/slab.h>
15 #include <linux/syscalls.h>
16
17 /**
18  * process_vm_rw_pages - read/write pages from task specified
19  * @pages: array of pointers to pages we want to copy
20  * @offset: offset in page to start copying from/to
21  * @len: number of bytes to copy
22  * @iter: where to copy to/from locally
23  * @vm_write: 0 means copy from, 1 means copy to
24  * Returns 0 on success, error code otherwise
25  */
26 static int process_vm_rw_pages(struct page **pages,
27                                unsigned offset,
28                                size_t len,
29                                struct iov_iter *iter,
30                                int vm_write)
31 {
32         /* Do the copy for each page */
33         while (len && iov_iter_count(iter)) {
34                 struct page *page = *pages++;
35                 size_t copy = PAGE_SIZE - offset;
36                 size_t copied;
37
38                 if (copy > len)
39                         copy = len;
40
41                 if (vm_write)
42                         copied = copy_page_from_iter(page, offset, copy, iter);
43                 else
44                         copied = copy_page_to_iter(page, offset, copy, iter);
45
46                 len -= copied;
47                 if (copied < copy && iov_iter_count(iter))
48                         return -EFAULT;
49                 offset = 0;
50         }
51         return 0;
52 }
53
54 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
55 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
56
57 /**
58  * process_vm_rw_single_vec - read/write pages from task specified
59  * @addr: start memory address of target process
60  * @len: size of area to copy to/from
61  * @iter: where to copy to/from locally
62  * @process_pages: struct pages area that can store at least
63  *  nr_pages_to_copy struct page pointers
64  * @mm: mm for task
65  * @task: task to read/write from
66  * @vm_write: 0 means copy from, 1 means copy to
67  * Returns 0 on success or on failure error code
68  */
69 static int process_vm_rw_single_vec(unsigned long addr,
70                                     unsigned long len,
71                                     struct iov_iter *iter,
72                                     struct page **process_pages,
73                                     struct mm_struct *mm,
74                                     struct task_struct *task,
75                                     int vm_write)
76 {
77         unsigned long pa = addr & PAGE_MASK;
78         unsigned long start_offset = addr - pa;
79         unsigned long nr_pages;
80         ssize_t rc = 0;
81         unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
82                 / sizeof(struct pages *);
83         unsigned int flags = 0;
84
85         /* Work out address and page range required */
86         if (len == 0)
87                 return 0;
88         nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
89
90         if (vm_write)
91                 flags |= FOLL_WRITE;
92
93         while (!rc && nr_pages && iov_iter_count(iter)) {
94                 int pinned_pages = min(nr_pages, max_pages_per_loop);
95                 int locked = 1;
96                 size_t bytes;
97
98                 /*
99                  * Get the pages we're interested in.  We must
100                  * access remotely because task/mm might not
101                  * current/current->mm
102                  */
103                 mmap_read_lock(mm);
104                 pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
105                                                      flags, process_pages,
106                                                      NULL, &locked);
107                 if (locked)
108                         mmap_read_unlock(mm);
109                 if (pinned_pages <= 0)
110                         return -EFAULT;
111
112                 bytes = pinned_pages * PAGE_SIZE - start_offset;
113                 if (bytes > len)
114                         bytes = len;
115
116                 rc = process_vm_rw_pages(process_pages,
117                                          start_offset, bytes, iter,
118                                          vm_write);
119                 len -= bytes;
120                 start_offset = 0;
121                 nr_pages -= pinned_pages;
122                 pa += pinned_pages * PAGE_SIZE;
123
124                 /* If vm_write is set, the pages need to be made dirty: */
125                 unpin_user_pages_dirty_lock(process_pages, pinned_pages,
126                                             vm_write);
127         }
128
129         return rc;
130 }
131
132 /* Maximum number of entries for process pages array
133    which lives on stack */
134 #define PVM_MAX_PP_ARRAY_COUNT 16
135
136 /**
137  * process_vm_rw_core - core of reading/writing pages from task specified
138  * @pid: PID of process to read/write from/to
139  * @iter: where to copy to/from locally
140  * @rvec: iovec array specifying where to copy to/from in the other process
141  * @riovcnt: size of rvec array
142  * @flags: currently unused
143  * @vm_write: 0 if reading from other process, 1 if writing to other process
144  *
145  * Returns the number of bytes read/written or error code. May
146  *  return less bytes than expected if an error occurs during the copying
147  *  process.
148  */
149 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
150                                   const struct iovec *rvec,
151                                   unsigned long riovcnt,
152                                   unsigned long flags, int vm_write)
153 {
154         struct task_struct *task;
155         struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
156         struct page **process_pages = pp_stack;
157         struct mm_struct *mm;
158         unsigned long i;
159         ssize_t rc = 0;
160         unsigned long nr_pages = 0;
161         unsigned long nr_pages_iov;
162         ssize_t iov_len;
163         size_t total_len = iov_iter_count(iter);
164
165         /*
166          * Work out how many pages of struct pages we're going to need
167          * when eventually calling get_user_pages
168          */
169         for (i = 0; i < riovcnt; i++) {
170                 iov_len = rvec[i].iov_len;
171                 if (iov_len > 0) {
172                         nr_pages_iov = ((unsigned long)rvec[i].iov_base
173                                         + iov_len)
174                                 / PAGE_SIZE - (unsigned long)rvec[i].iov_base
175                                 / PAGE_SIZE + 1;
176                         nr_pages = max(nr_pages, nr_pages_iov);
177                 }
178         }
179
180         if (nr_pages == 0)
181                 return 0;
182
183         if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
184                 /* For reliability don't try to kmalloc more than
185                    2 pages worth */
186                 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
187                                               sizeof(struct pages *)*nr_pages),
188                                         GFP_KERNEL);
189
190                 if (!process_pages)
191                         return -ENOMEM;
192         }
193
194         /* Get process information */
195         task = find_get_task_by_vpid(pid);
196         if (!task) {
197                 rc = -ESRCH;
198                 goto free_proc_pages;
199         }
200
201         mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
202         if (!mm || IS_ERR(mm)) {
203                 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
204                 /*
205                  * Explicitly map EACCES to EPERM as EPERM is a more
206                  * appropriate error code for process_vw_readv/writev
207                  */
208                 if (rc == -EACCES)
209                         rc = -EPERM;
210                 goto put_task_struct;
211         }
212
213         for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
214                 rc = process_vm_rw_single_vec(
215                         (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
216                         iter, process_pages, mm, task, vm_write);
217
218         /* copied = space before - space after */
219         total_len -= iov_iter_count(iter);
220
221         /* If we have managed to copy any data at all then
222            we return the number of bytes copied. Otherwise
223            we return the error code */
224         if (total_len)
225                 rc = total_len;
226
227         mmput(mm);
228
229 put_task_struct:
230         put_task_struct(task);
231
232 free_proc_pages:
233         if (process_pages != pp_stack)
234                 kfree(process_pages);
235         return rc;
236 }
237
238 /**
239  * process_vm_rw - check iovecs before calling core routine
240  * @pid: PID of process to read/write from/to
241  * @lvec: iovec array specifying where to copy to/from locally
242  * @liovcnt: size of lvec array
243  * @rvec: iovec array specifying where to copy to/from in the other process
244  * @riovcnt: size of rvec array
245  * @flags: currently unused
246  * @vm_write: 0 if reading from other process, 1 if writing to other process
247  *
248  * Returns the number of bytes read/written or error code. May
249  *  return less bytes than expected if an error occurs during the copying
250  *  process.
251  */
252 static ssize_t process_vm_rw(pid_t pid,
253                              const struct iovec __user *lvec,
254                              unsigned long liovcnt,
255                              const struct iovec __user *rvec,
256                              unsigned long riovcnt,
257                              unsigned long flags, int vm_write)
258 {
259         struct iovec iovstack_l[UIO_FASTIOV];
260         struct iovec iovstack_r[UIO_FASTIOV];
261         struct iovec *iov_l = iovstack_l;
262         struct iovec *iov_r = iovstack_r;
263         struct iov_iter iter;
264         ssize_t rc;
265         int dir = vm_write ? WRITE : READ;
266
267         if (flags != 0)
268                 return -EINVAL;
269
270         /* Check iovecs */
271         rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
272         if (rc < 0)
273                 return rc;
274         if (!iov_iter_count(&iter))
275                 goto free_iov_l;
276         iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r, false);
277         if (IS_ERR(iov_r)) {
278                 rc = PTR_ERR(iov_r);
279                 goto free_iov_l;
280         }
281         rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
282         if (iov_r != iovstack_r)
283                 kfree(iov_r);
284 free_iov_l:
285         kfree(iov_l);
286         return rc;
287 }
288
289 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
290                 unsigned long, liovcnt, const struct iovec __user *, rvec,
291                 unsigned long, riovcnt, unsigned long, flags)
292 {
293         return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
294 }
295
296 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
297                 const struct iovec __user *, lvec,
298                 unsigned long, liovcnt, const struct iovec __user *, rvec,
299                 unsigned long, riovcnt, unsigned long, flags)
300 {
301         return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
302 }