1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/mm/process_vm_access.c
5 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/highmem.h>
13 #include <linux/ptrace.h>
14 #include <linux/slab.h>
15 #include <linux/syscalls.h>
18 * process_vm_rw_pages - read/write pages from task specified
19 * @pages: array of pointers to pages we want to copy
20 * @offset: offset in page to start copying from/to
21 * @len: number of bytes to copy
22 * @iter: where to copy to/from locally
23 * @vm_write: 0 means copy from, 1 means copy to
24 * Returns 0 on success, error code otherwise
26 static int process_vm_rw_pages(struct page **pages,
29 struct iov_iter *iter,
32 /* Do the copy for each page */
33 while (len && iov_iter_count(iter)) {
34 struct page *page = *pages++;
35 size_t copy = PAGE_SIZE - offset;
42 copied = copy_page_from_iter(page, offset, copy, iter);
44 copied = copy_page_to_iter(page, offset, copy, iter);
47 if (copied < copy && iov_iter_count(iter))
54 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
55 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
58 * process_vm_rw_single_vec - read/write pages from task specified
59 * @addr: start memory address of target process
60 * @len: size of area to copy to/from
61 * @iter: where to copy to/from locally
62 * @process_pages: struct pages area that can store at least
63 * nr_pages_to_copy struct page pointers
65 * @task: task to read/write from
66 * @vm_write: 0 means copy from, 1 means copy to
67 * Returns 0 on success or on failure error code
69 static int process_vm_rw_single_vec(unsigned long addr,
71 struct iov_iter *iter,
72 struct page **process_pages,
74 struct task_struct *task,
77 unsigned long pa = addr & PAGE_MASK;
78 unsigned long start_offset = addr - pa;
79 unsigned long nr_pages;
81 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
82 / sizeof(struct pages *);
83 unsigned int flags = 0;
85 /* Work out address and page range required */
88 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
93 while (!rc && nr_pages && iov_iter_count(iter)) {
94 int pinned_pages = min(nr_pages, max_pages_per_loop);
99 * Get the pages we're interested in. We must
100 * access remotely because task/mm might not
101 * current/current->mm
104 pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
105 flags, process_pages,
108 mmap_read_unlock(mm);
109 if (pinned_pages <= 0)
112 bytes = pinned_pages * PAGE_SIZE - start_offset;
116 rc = process_vm_rw_pages(process_pages,
117 start_offset, bytes, iter,
121 nr_pages -= pinned_pages;
122 pa += pinned_pages * PAGE_SIZE;
124 /* If vm_write is set, the pages need to be made dirty: */
125 unpin_user_pages_dirty_lock(process_pages, pinned_pages,
132 /* Maximum number of entries for process pages array
133 which lives on stack */
134 #define PVM_MAX_PP_ARRAY_COUNT 16
137 * process_vm_rw_core - core of reading/writing pages from task specified
138 * @pid: PID of process to read/write from/to
139 * @iter: where to copy to/from locally
140 * @rvec: iovec array specifying where to copy to/from in the other process
141 * @riovcnt: size of rvec array
142 * @flags: currently unused
143 * @vm_write: 0 if reading from other process, 1 if writing to other process
145 * Returns the number of bytes read/written or error code. May
146 * return less bytes than expected if an error occurs during the copying
149 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
150 const struct iovec *rvec,
151 unsigned long riovcnt,
152 unsigned long flags, int vm_write)
154 struct task_struct *task;
155 struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
156 struct page **process_pages = pp_stack;
157 struct mm_struct *mm;
160 unsigned long nr_pages = 0;
161 unsigned long nr_pages_iov;
163 size_t total_len = iov_iter_count(iter);
166 * Work out how many pages of struct pages we're going to need
167 * when eventually calling get_user_pages
169 for (i = 0; i < riovcnt; i++) {
170 iov_len = rvec[i].iov_len;
172 nr_pages_iov = ((unsigned long)rvec[i].iov_base
174 / PAGE_SIZE - (unsigned long)rvec[i].iov_base
176 nr_pages = max(nr_pages, nr_pages_iov);
183 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
184 /* For reliability don't try to kmalloc more than
186 process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
187 sizeof(struct pages *)*nr_pages),
194 /* Get process information */
195 task = find_get_task_by_vpid(pid);
198 goto free_proc_pages;
201 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
202 if (!mm || IS_ERR(mm)) {
203 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
205 * Explicitly map EACCES to EPERM as EPERM is a more
206 * appropriate error code for process_vw_readv/writev
210 goto put_task_struct;
213 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
214 rc = process_vm_rw_single_vec(
215 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
216 iter, process_pages, mm, task, vm_write);
218 /* copied = space before - space after */
219 total_len -= iov_iter_count(iter);
221 /* If we have managed to copy any data at all then
222 we return the number of bytes copied. Otherwise
223 we return the error code */
230 put_task_struct(task);
233 if (process_pages != pp_stack)
234 kfree(process_pages);
239 * process_vm_rw - check iovecs before calling core routine
240 * @pid: PID of process to read/write from/to
241 * @lvec: iovec array specifying where to copy to/from locally
242 * @liovcnt: size of lvec array
243 * @rvec: iovec array specifying where to copy to/from in the other process
244 * @riovcnt: size of rvec array
245 * @flags: currently unused
246 * @vm_write: 0 if reading from other process, 1 if writing to other process
248 * Returns the number of bytes read/written or error code. May
249 * return less bytes than expected if an error occurs during the copying
252 static ssize_t process_vm_rw(pid_t pid,
253 const struct iovec __user *lvec,
254 unsigned long liovcnt,
255 const struct iovec __user *rvec,
256 unsigned long riovcnt,
257 unsigned long flags, int vm_write)
259 struct iovec iovstack_l[UIO_FASTIOV];
260 struct iovec iovstack_r[UIO_FASTIOV];
261 struct iovec *iov_l = iovstack_l;
262 struct iovec *iov_r = iovstack_r;
263 struct iov_iter iter;
265 int dir = vm_write ? WRITE : READ;
271 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
274 if (!iov_iter_count(&iter))
276 iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r, false);
281 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
282 if (iov_r != iovstack_r)
289 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
290 unsigned long, liovcnt, const struct iovec __user *, rvec,
291 unsigned long, riovcnt, unsigned long, flags)
293 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
296 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
297 const struct iovec __user *, lvec,
298 unsigned long, liovcnt, const struct iovec __user *, rvec,
299 unsigned long, riovcnt, unsigned long, flags)
301 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);