2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
5 * Copyright IBM Corp. 2006
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
12 #include <asm/uaccess.h>
13 #include <asm/futex.h>
16 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
22 pgd = pgd_offset(mm, addr);
23 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
24 return (pte_t *) 0x3a;
26 pud = pud_offset(pgd, addr);
27 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
28 return (pte_t *) 0x3b;
30 pmd = pmd_offset(pud, addr);
31 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
32 return (pte_t *) 0x10;
34 return pte_offset_map(pmd, addr);
37 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
38 size_t n, int write_user)
40 struct mm_struct *mm = current->mm;
41 unsigned long offset, pfn, done, size;
47 spin_lock(&mm->page_table_lock);
49 pte = follow_table(mm, uaddr);
50 if ((unsigned long) pte < 0x1000)
52 if (!pte_present(*pte)) {
55 } else if (write_user && !pte_write(*pte)) {
61 offset = uaddr & (PAGE_SIZE - 1);
62 size = min(n - done, PAGE_SIZE - offset);
64 to = (void *)((pfn << PAGE_SHIFT) + offset);
67 from = (void *)((pfn << PAGE_SHIFT) + offset);
70 memcpy(to, from, size);
74 spin_unlock(&mm->page_table_lock);
77 spin_unlock(&mm->page_table_lock);
78 if (__handle_fault(uaddr, (unsigned long) pte, write_user))
84 * Do DAT for user address by page table walk, return kernel address.
85 * This function needs to be called with current->mm->page_table_lock held.
87 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
89 struct mm_struct *mm = current->mm;
95 pte = follow_table(mm, uaddr);
96 if ((unsigned long) pte < 0x1000)
98 if (!pte_present(*pte)) {
104 return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
106 spin_unlock(&mm->page_table_lock);
107 rc = __handle_fault(uaddr, (unsigned long) pte, 0);
108 spin_lock(&mm->page_table_lock);
114 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
118 if (segment_eq(get_fs(), KERNEL_DS)) {
119 memcpy(to, (void __kernel __force *) from, n);
122 rc = __user_copy_pt((unsigned long) from, to, n, 0);
124 memset(to + n - rc, 0, rc);
128 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
130 if (segment_eq(get_fs(), KERNEL_DS)) {
131 memcpy((void __kernel __force *) to, from, n);
134 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
137 static size_t clear_user_pt(size_t n, void __user *to)
139 long done, size, ret;
141 if (segment_eq(get_fs(), KERNEL_DS)) {
142 memset((void __kernel __force *) to, 0, n);
147 if (n - done > PAGE_SIZE)
151 ret = __user_copy_pt((unsigned long) to + done,
152 &empty_zero_page, size, 1);
155 return ret + n - done;
160 static size_t strnlen_user_pt(size_t count, const char __user *src)
163 unsigned long uaddr = (unsigned long) src;
164 struct mm_struct *mm = current->mm;
165 unsigned long offset, pfn, done, len;
169 if (segment_eq(get_fs(), KERNEL_DS))
170 return strnlen((const char __kernel __force *) src, count) + 1;
173 spin_lock(&mm->page_table_lock);
175 pte = follow_table(mm, uaddr);
176 if ((unsigned long) pte < 0x1000)
178 if (!pte_present(*pte)) {
179 pte = (pte_t *) 0x11;
184 offset = uaddr & (PAGE_SIZE-1);
185 addr = (char *)(pfn << PAGE_SHIFT) + offset;
186 len = min(count - done, PAGE_SIZE - offset);
187 len_str = strnlen(addr, len);
190 } while ((len_str == len) && (done < count));
191 spin_unlock(&mm->page_table_lock);
194 spin_unlock(&mm->page_table_lock);
195 if (__handle_fault(uaddr, (unsigned long) pte, 0))
200 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
203 size_t n = strnlen_user_pt(count, src);
209 if (segment_eq(get_fs(), KERNEL_DS)) {
210 memcpy(dst, (const char __kernel __force *) src, n);
211 if (dst[n-1] == '\0')
216 if (__user_copy_pt((unsigned long) src, dst, n, 0))
218 if (dst[n-1] == '\0')
224 static size_t copy_in_user_pt(size_t n, void __user *to,
225 const void __user *from)
227 struct mm_struct *mm = current->mm;
228 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
229 uaddr, done, size, error_code;
230 unsigned long uaddr_from = (unsigned long) from;
231 unsigned long uaddr_to = (unsigned long) to;
232 pte_t *pte_from, *pte_to;
235 if (segment_eq(get_fs(), KERNEL_DS)) {
236 memcpy((void __force *) to, (void __force *) from, n);
241 spin_lock(&mm->page_table_lock);
245 pte_from = follow_table(mm, uaddr_from);
246 error_code = (unsigned long) pte_from;
247 if (error_code < 0x1000)
249 if (!pte_present(*pte_from)) {
256 pte_to = follow_table(mm, uaddr_to);
257 error_code = (unsigned long) pte_to;
258 if (error_code < 0x1000)
260 if (!pte_present(*pte_to)) {
263 } else if (!pte_write(*pte_to)) {
268 pfn_from = pte_pfn(*pte_from);
269 pfn_to = pte_pfn(*pte_to);
270 offset_from = uaddr_from & (PAGE_SIZE-1);
271 offset_to = uaddr_from & (PAGE_SIZE-1);
272 offset_max = max(offset_from, offset_to);
273 size = min(n - done, PAGE_SIZE - offset_max);
275 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
276 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
281 spin_unlock(&mm->page_table_lock);
284 spin_unlock(&mm->page_table_lock);
285 if (__handle_fault(uaddr, error_code, write_user))
290 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
291 asm volatile("0: l %1,0(%6)\n" \
293 "2: cs %1,%2,0(%6)\n" \
297 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
298 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
300 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
301 "m" (*uaddr) : "cc" );
303 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
305 int oldval = 0, newval, ret;
309 __futex_atomic_op("lr %2,%5\n",
310 ret, oldval, newval, uaddr, oparg);
313 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
314 ret, oldval, newval, uaddr, oparg);
317 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
318 ret, oldval, newval, uaddr, oparg);
321 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
322 ret, oldval, newval, uaddr, oparg);
325 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
326 ret, oldval, newval, uaddr, oparg);
336 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
340 if (segment_eq(get_fs(), KERNEL_DS))
341 return __futex_atomic_op_pt(op, uaddr, oparg, old);
342 spin_lock(¤t->mm->page_table_lock);
343 uaddr = (u32 __force __user *)
344 __dat_user_addr((__force unsigned long) uaddr);
346 spin_unlock(¤t->mm->page_table_lock);
349 get_page(virt_to_page(uaddr));
350 spin_unlock(¤t->mm->page_table_lock);
351 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
352 put_page(virt_to_page(uaddr));
356 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
357 u32 oldval, u32 newval)
361 asm volatile("0: cs %1,%4,0(%5)\n"
364 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
365 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
366 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
372 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
373 u32 oldval, u32 newval)
377 if (segment_eq(get_fs(), KERNEL_DS))
378 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
379 spin_lock(¤t->mm->page_table_lock);
380 uaddr = (u32 __force __user *)
381 __dat_user_addr((__force unsigned long) uaddr);
383 spin_unlock(¤t->mm->page_table_lock);
386 get_page(virt_to_page(uaddr));
387 spin_unlock(¤t->mm->page_table_lock);
388 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
389 put_page(virt_to_page(uaddr));
393 struct uaccess_ops uaccess_pt = {
394 .copy_from_user = copy_from_user_pt,
395 .copy_from_user_small = copy_from_user_pt,
396 .copy_to_user = copy_to_user_pt,
397 .copy_to_user_small = copy_to_user_pt,
398 .copy_in_user = copy_in_user_pt,
399 .clear_user = clear_user_pt,
400 .strnlen_user = strnlen_user_pt,
401 .strncpy_from_user = strncpy_from_user_pt,
402 .futex_atomic_op = futex_atomic_op_pt,
403 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,