d43c1dc6ef157a59c4b96d456aacc9d69ce077ed
[linux-2.6-microblaze.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33
34 #define __UA_ADDR       ".word"
35 #define __UA_LA         "la"
36 #define __UA_ADDU       "addu"
37 #define __UA_t0         "$8"
38 #define __UA_t1         "$9"
39
40 #endif /* CONFIG_32BIT */
41
42 #ifdef CONFIG_64BIT
43
44 extern u64 __ua_limit;
45
46 #define __UA_LIMIT      __ua_limit
47
48 #define __UA_ADDR       ".dword"
49 #define __UA_LA         "dla"
50 #define __UA_ADDU       "daddu"
51 #define __UA_t0         "$12"
52 #define __UA_t1         "$13"
53
54 #endif /* CONFIG_64BIT */
55
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
66 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS       ((mm_segment_t) { 0UL })
69 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
70 #endif
71
72 #define get_ds()        (KERNEL_DS)
73 #define get_fs()        (current_thread_info()->addr_limit)
74 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
75
76 #define segment_eq(a, b)        ((a).seg == (b).seg)
77
78 /*
79  * eva_kernel_access() - determine whether kernel memory access on an EVA system
80  *
81  * Determines whether memory accesses should be performed to kernel memory
82  * on a system using Extended Virtual Addressing (EVA).
83  *
84  * Return: true if a kernel memory access on an EVA system, else false.
85  */
86 static inline bool eva_kernel_access(void)
87 {
88         if (!IS_ENABLED(CONFIG_EVA))
89                 return false;
90
91         return uaccess_kernel();
92 }
93
94 /*
95  * Is a address valid? This does a straightforward calculation rather
96  * than tests.
97  *
98  * Address valid if:
99  *  - "addr" doesn't have any high-bits set
100  *  - AND "size" doesn't have any high-bits set
101  *  - AND "addr+size" doesn't have any high-bits set
102  *  - OR we are in kernel mode.
103  *
104  * __ua_size() is a trick to avoid runtime checking of positive constant
105  * sizes; for those we already know at compile time that the size is ok.
106  */
107 #define __ua_size(size)                                                 \
108         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
109
110 /*
111  * access_ok: - Checks if a user space pointer is valid
112  * @addr: User space pointer to start of block to check
113  * @size: Size of block to check
114  *
115  * Context: User context only. This function may sleep if pagefaults are
116  *          enabled.
117  *
118  * Checks if a pointer to a block of memory in user space is valid.
119  *
120  * Returns true (nonzero) if the memory block may be valid, false (zero)
121  * if it is definitely invalid.
122  *
123  * Note that, depending on architecture, this function probably just
124  * checks that the pointer is in the user space range - after calling
125  * this function, memory access functions may still return -EFAULT.
126  */
127
128 static inline int __access_ok(const void __user *p, unsigned long size)
129 {
130         unsigned long addr = (unsigned long)p;
131         return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
132 }
133
134 #define access_ok(addr, size)                                   \
135         likely(__access_ok((addr), (size)))
136
137 /*
138  * put_user: - Write a simple value into user space.
139  * @x:   Value to copy to user space.
140  * @ptr: Destination address, in user space.
141  *
142  * Context: User context only. This function may sleep if pagefaults are
143  *          enabled.
144  *
145  * This macro copies a single simple value from kernel space to user
146  * space.  It supports simple types like char and int, but not larger
147  * data types like structures or arrays.
148  *
149  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
150  * to the result of dereferencing @ptr.
151  *
152  * Returns zero on success, or -EFAULT on error.
153  */
154 #define put_user(x,ptr) \
155         __put_user_check((x), (ptr), sizeof(*(ptr)))
156
157 /*
158  * get_user: - Get a simple variable from user space.
159  * @x:   Variable to store result.
160  * @ptr: Source address, in user space.
161  *
162  * Context: User context only. This function may sleep if pagefaults are
163  *          enabled.
164  *
165  * This macro copies a single simple variable from user space to kernel
166  * space.  It supports simple types like char and int, but not larger
167  * data types like structures or arrays.
168  *
169  * @ptr must have pointer-to-simple-variable type, and the result of
170  * dereferencing @ptr must be assignable to @x without a cast.
171  *
172  * Returns zero on success, or -EFAULT on error.
173  * On error, the variable @x is set to zero.
174  */
175 #define get_user(x,ptr) \
176         __get_user_check((x), (ptr), sizeof(*(ptr)))
177
178 /*
179  * __put_user: - Write a simple value into user space, with less checking.
180  * @x:   Value to copy to user space.
181  * @ptr: Destination address, in user space.
182  *
183  * Context: User context only. This function may sleep if pagefaults are
184  *          enabled.
185  *
186  * This macro copies a single simple value from kernel space to user
187  * space.  It supports simple types like char and int, but not larger
188  * data types like structures or arrays.
189  *
190  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
191  * to the result of dereferencing @ptr.
192  *
193  * Caller must check the pointer with access_ok() before calling this
194  * function.
195  *
196  * Returns zero on success, or -EFAULT on error.
197  */
198 #define __put_user(x,ptr) \
199         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
200
201 /*
202  * __get_user: - Get a simple variable from user space, with less checking.
203  * @x:   Variable to store result.
204  * @ptr: Source address, in user space.
205  *
206  * Context: User context only. This function may sleep if pagefaults are
207  *          enabled.
208  *
209  * This macro copies a single simple variable from user space to kernel
210  * space.  It supports simple types like char and int, but not larger
211  * data types like structures or arrays.
212  *
213  * @ptr must have pointer-to-simple-variable type, and the result of
214  * dereferencing @ptr must be assignable to @x without a cast.
215  *
216  * Caller must check the pointer with access_ok() before calling this
217  * function.
218  *
219  * Returns zero on success, or -EFAULT on error.
220  * On error, the variable @x is set to zero.
221  */
222 #define __get_user(x,ptr) \
223         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
224
225 struct __large_struct { unsigned long buf[100]; };
226 #define __m(x) (*(struct __large_struct __user *)(x))
227
228 /*
229  * Yuck.  We need two variants, one for 64bit operation and one
230  * for 32 bit mode and old iron.
231  */
232 #ifndef CONFIG_EVA
233 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
234 #else
235 /*
236  * Kernel specific functions for EVA. We need to use normal load instructions
237  * to read data from kernel when operating in EVA mode. We use these macros to
238  * avoid redefining __get_user_asm for EVA.
239  */
240 #undef _loadd
241 #undef _loadw
242 #undef _loadh
243 #undef _loadb
244 #ifdef CONFIG_32BIT
245 #define _loadd                  _loadw
246 #else
247 #define _loadd(reg, addr)       "ld " reg ", " addr
248 #endif
249 #define _loadw(reg, addr)       "lw " reg ", " addr
250 #define _loadh(reg, addr)       "lh " reg ", " addr
251 #define _loadb(reg, addr)       "lb " reg ", " addr
252
253 #define __get_kernel_common(val, size, ptr)                             \
254 do {                                                                    \
255         switch (size) {                                                 \
256         case 1: __get_data_asm(val, _loadb, ptr); break;                \
257         case 2: __get_data_asm(val, _loadh, ptr); break;                \
258         case 4: __get_data_asm(val, _loadw, ptr); break;                \
259         case 8: __GET_DW(val, _loadd, ptr); break;                      \
260         default: __get_user_unknown(); break;                           \
261         }                                                               \
262 } while (0)
263 #endif
264
265 #ifdef CONFIG_32BIT
266 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
267 #endif
268 #ifdef CONFIG_64BIT
269 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
270 #endif
271
272 extern void __get_user_unknown(void);
273
274 #define __get_user_common(val, size, ptr)                               \
275 do {                                                                    \
276         switch (size) {                                                 \
277         case 1: __get_data_asm(val, user_lb, ptr); break;               \
278         case 2: __get_data_asm(val, user_lh, ptr); break;               \
279         case 4: __get_data_asm(val, user_lw, ptr); break;               \
280         case 8: __GET_DW(val, user_ld, ptr); break;                     \
281         default: __get_user_unknown(); break;                           \
282         }                                                               \
283 } while (0)
284
285 #define __get_user_nocheck(x, ptr, size)                                \
286 ({                                                                      \
287         int __gu_err;                                                   \
288                                                                         \
289         if (eva_kernel_access()) {                                      \
290                 __get_kernel_common((x), size, ptr);                    \
291         } else {                                                        \
292                 __chk_user_ptr(ptr);                                    \
293                 __get_user_common((x), size, ptr);                      \
294         }                                                               \
295         __gu_err;                                                       \
296 })
297
298 #define __get_user_check(x, ptr, size)                                  \
299 ({                                                                      \
300         int __gu_err = -EFAULT;                                         \
301         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
302                                                                         \
303         might_fault();                                                  \
304         if (likely(access_ok( __gu_ptr, size))) {               \
305                 if (eva_kernel_access())                                \
306                         __get_kernel_common((x), size, __gu_ptr);       \
307                 else                                                    \
308                         __get_user_common((x), size, __gu_ptr);         \
309         } else                                                          \
310                 (x) = 0;                                                \
311                                                                         \
312         __gu_err;                                                       \
313 })
314
315 #define __get_data_asm(val, insn, addr)                                 \
316 {                                                                       \
317         long __gu_tmp;                                                  \
318                                                                         \
319         __asm__ __volatile__(                                           \
320         "1:     "insn("%1", "%3")"                              \n"     \
321         "2:                                                     \n"     \
322         "       .insn                                           \n"     \
323         "       .section .fixup,\"ax\"                          \n"     \
324         "3:     li      %0, %4                                  \n"     \
325         "       move    %1, $0                                  \n"     \
326         "       j       2b                                      \n"     \
327         "       .previous                                       \n"     \
328         "       .section __ex_table,\"a\"                       \n"     \
329         "       "__UA_ADDR "\t1b, 3b                            \n"     \
330         "       .previous                                       \n"     \
331         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
332         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
333                                                                         \
334         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
335 }
336
337 /*
338  * Get a long long 64 using 32 bit registers.
339  */
340 #define __get_data_asm_ll32(val, insn, addr)                            \
341 {                                                                       \
342         union {                                                         \
343                 unsigned long long      l;                              \
344                 __typeof__(*(addr))     t;                              \
345         } __gu_tmp;                                                     \
346                                                                         \
347         __asm__ __volatile__(                                           \
348         "1:     " insn("%1", "(%3)")"                           \n"     \
349         "2:     " insn("%D1", "4(%3)")"                         \n"     \
350         "3:                                                     \n"     \
351         "       .insn                                           \n"     \
352         "       .section        .fixup,\"ax\"                   \n"     \
353         "4:     li      %0, %4                                  \n"     \
354         "       move    %1, $0                                  \n"     \
355         "       move    %D1, $0                                 \n"     \
356         "       j       3b                                      \n"     \
357         "       .previous                                       \n"     \
358         "       .section        __ex_table,\"a\"                \n"     \
359         "       " __UA_ADDR "   1b, 4b                          \n"     \
360         "       " __UA_ADDR "   2b, 4b                          \n"     \
361         "       .previous                                       \n"     \
362         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
363         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
364                                                                         \
365         (val) = __gu_tmp.t;                                             \
366 }
367
368 #ifndef CONFIG_EVA
369 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
370 #else
371 /*
372  * Kernel specific functions for EVA. We need to use normal load instructions
373  * to read data from kernel when operating in EVA mode. We use these macros to
374  * avoid redefining __get_data_asm for EVA.
375  */
376 #undef _stored
377 #undef _storew
378 #undef _storeh
379 #undef _storeb
380 #ifdef CONFIG_32BIT
381 #define _stored                 _storew
382 #else
383 #define _stored(reg, addr)      "ld " reg ", " addr
384 #endif
385
386 #define _storew(reg, addr)      "sw " reg ", " addr
387 #define _storeh(reg, addr)      "sh " reg ", " addr
388 #define _storeb(reg, addr)      "sb " reg ", " addr
389
390 #define __put_kernel_common(ptr, size)                                  \
391 do {                                                                    \
392         switch (size) {                                                 \
393         case 1: __put_data_asm(_storeb, ptr); break;                    \
394         case 2: __put_data_asm(_storeh, ptr); break;                    \
395         case 4: __put_data_asm(_storew, ptr); break;                    \
396         case 8: __PUT_DW(_stored, ptr); break;                          \
397         default: __put_user_unknown(); break;                           \
398         }                                                               \
399 } while(0)
400 #endif
401
402 /*
403  * Yuck.  We need two variants, one for 64bit operation and one
404  * for 32 bit mode and old iron.
405  */
406 #ifdef CONFIG_32BIT
407 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
408 #endif
409 #ifdef CONFIG_64BIT
410 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
411 #endif
412
413 #define __put_user_common(ptr, size)                                    \
414 do {                                                                    \
415         switch (size) {                                                 \
416         case 1: __put_data_asm(user_sb, ptr); break;                    \
417         case 2: __put_data_asm(user_sh, ptr); break;                    \
418         case 4: __put_data_asm(user_sw, ptr); break;                    \
419         case 8: __PUT_DW(user_sd, ptr); break;                          \
420         default: __put_user_unknown(); break;                           \
421         }                                                               \
422 } while (0)
423
424 #define __put_user_nocheck(x, ptr, size)                                \
425 ({                                                                      \
426         __typeof__(*(ptr)) __pu_val;                                    \
427         int __pu_err = 0;                                               \
428                                                                         \
429         __pu_val = (x);                                                 \
430         if (eva_kernel_access()) {                                      \
431                 __put_kernel_common(ptr, size);                         \
432         } else {                                                        \
433                 __chk_user_ptr(ptr);                                    \
434                 __put_user_common(ptr, size);                           \
435         }                                                               \
436         __pu_err;                                                       \
437 })
438
439 #define __put_user_check(x, ptr, size)                                  \
440 ({                                                                      \
441         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
442         __typeof__(*(ptr)) __pu_val = (x);                              \
443         int __pu_err = -EFAULT;                                         \
444                                                                         \
445         might_fault();                                                  \
446         if (likely(access_ok( __pu_addr, size))) {      \
447                 if (eva_kernel_access())                                \
448                         __put_kernel_common(__pu_addr, size);           \
449                 else                                                    \
450                         __put_user_common(__pu_addr, size);             \
451         }                                                               \
452                                                                         \
453         __pu_err;                                                       \
454 })
455
456 #define __put_data_asm(insn, ptr)                                       \
457 {                                                                       \
458         __asm__ __volatile__(                                           \
459         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
460         "2:                                                     \n"     \
461         "       .insn                                           \n"     \
462         "       .section        .fixup,\"ax\"                   \n"     \
463         "3:     li      %0, %4                                  \n"     \
464         "       j       2b                                      \n"     \
465         "       .previous                                       \n"     \
466         "       .section        __ex_table,\"a\"                \n"     \
467         "       " __UA_ADDR "   1b, 3b                          \n"     \
468         "       .previous                                       \n"     \
469         : "=r" (__pu_err)                                               \
470         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
471           "i" (-EFAULT));                                               \
472 }
473
474 #define __put_data_asm_ll32(insn, ptr)                                  \
475 {                                                                       \
476         __asm__ __volatile__(                                           \
477         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
478         "2:     "insn("%D2", "4(%3)")"                          \n"     \
479         "3:                                                     \n"     \
480         "       .insn                                           \n"     \
481         "       .section        .fixup,\"ax\"                   \n"     \
482         "4:     li      %0, %4                                  \n"     \
483         "       j       3b                                      \n"     \
484         "       .previous                                       \n"     \
485         "       .section        __ex_table,\"a\"                \n"     \
486         "       " __UA_ADDR "   1b, 4b                          \n"     \
487         "       " __UA_ADDR "   2b, 4b                          \n"     \
488         "       .previous"                                              \
489         : "=r" (__pu_err)                                               \
490         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
491           "i" (-EFAULT));                                               \
492 }
493
494 extern void __put_user_unknown(void);
495
496 /*
497  * We're generating jump to subroutines which will be outside the range of
498  * jump instructions
499  */
500 #ifdef MODULE
501 #define __MODULE_JAL(destination)                                       \
502         ".set\tnoat\n\t"                                                \
503         __UA_LA "\t$1, " #destination "\n\t"                            \
504         "jalr\t$1\n\t"                                                  \
505         ".set\tat\n\t"
506 #else
507 #define __MODULE_JAL(destination)                                       \
508         "jal\t" #destination "\n\t"
509 #endif
510
511 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
512                                               defined(CONFIG_CPU_HAS_PREFETCH))
513 #define DADDI_SCRATCH "$3"
514 #else
515 #define DADDI_SCRATCH "$0"
516 #endif
517
518 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
519
520 #define __invoke_copy_from(func, to, from, n)                           \
521 ({                                                                      \
522         register void *__cu_to_r __asm__("$4");                         \
523         register const void __user *__cu_from_r __asm__("$5");          \
524         register long __cu_len_r __asm__("$6");                         \
525                                                                         \
526         __cu_to_r = (to);                                               \
527         __cu_from_r = (from);                                           \
528         __cu_len_r = (n);                                               \
529         __asm__ __volatile__(                                           \
530         ".set\tnoreorder\n\t"                                           \
531         __MODULE_JAL(func)                                              \
532         ".set\tnoat\n\t"                                                \
533         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
534         ".set\tat\n\t"                                                  \
535         ".set\treorder"                                                 \
536         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
537         :                                                               \
538         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
539           DADDI_SCRATCH, "memory");                                     \
540         __cu_len_r;                                                     \
541 })
542
543 #define __invoke_copy_to(func, to, from, n)                             \
544 ({                                                                      \
545         register void __user *__cu_to_r __asm__("$4");                  \
546         register const void *__cu_from_r __asm__("$5");                 \
547         register long __cu_len_r __asm__("$6");                         \
548                                                                         \
549         __cu_to_r = (to);                                               \
550         __cu_from_r = (from);                                           \
551         __cu_len_r = (n);                                               \
552         __asm__ __volatile__(                                           \
553         __MODULE_JAL(func)                                              \
554         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
555         :                                                               \
556         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
557           DADDI_SCRATCH, "memory");                                     \
558         __cu_len_r;                                                     \
559 })
560
561 #define __invoke_copy_from_kernel(to, from, n)                          \
562         __invoke_copy_from(__copy_user, to, from, n)
563
564 #define __invoke_copy_to_kernel(to, from, n)                            \
565         __invoke_copy_to(__copy_user, to, from, n)
566
567 #define ___invoke_copy_in_kernel(to, from, n)                           \
568         __invoke_copy_from(__copy_user, to, from, n)
569
570 #ifndef CONFIG_EVA
571 #define __invoke_copy_from_user(to, from, n)                            \
572         __invoke_copy_from(__copy_user, to, from, n)
573
574 #define __invoke_copy_to_user(to, from, n)                              \
575         __invoke_copy_to(__copy_user, to, from, n)
576
577 #define ___invoke_copy_in_user(to, from, n)                             \
578         __invoke_copy_from(__copy_user, to, from, n)
579
580 #else
581
582 /* EVA specific functions */
583
584 extern size_t __copy_from_user_eva(void *__to, const void *__from,
585                                    size_t __n);
586 extern size_t __copy_to_user_eva(void *__to, const void *__from,
587                                  size_t __n);
588 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
589
590 /*
591  * Source or destination address is in userland. We need to go through
592  * the TLB
593  */
594 #define __invoke_copy_from_user(to, from, n)                            \
595         __invoke_copy_from(__copy_from_user_eva, to, from, n)
596
597 #define __invoke_copy_to_user(to, from, n)                              \
598         __invoke_copy_to(__copy_to_user_eva, to, from, n)
599
600 #define ___invoke_copy_in_user(to, from, n)                             \
601         __invoke_copy_from(__copy_in_user_eva, to, from, n)
602
603 #endif /* CONFIG_EVA */
604
605 static inline unsigned long
606 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
607 {
608         if (eva_kernel_access())
609                 return __invoke_copy_to_kernel(to, from, n);
610         else
611                 return __invoke_copy_to_user(to, from, n);
612 }
613
614 static inline unsigned long
615 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
616 {
617         if (eva_kernel_access())
618                 return __invoke_copy_from_kernel(to, from, n);
619         else
620                 return __invoke_copy_from_user(to, from, n);
621 }
622
623 #define INLINE_COPY_FROM_USER
624 #define INLINE_COPY_TO_USER
625
626 static inline unsigned long
627 raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
628 {
629         if (eva_kernel_access())
630                 return ___invoke_copy_in_kernel(to, from, n);
631         else
632                 return ___invoke_copy_in_user(to, from, n);
633 }
634
635 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
636 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
637
638 /*
639  * __clear_user: - Zero a block of memory in user space, with less checking.
640  * @to:   Destination address, in user space.
641  * @n:    Number of bytes to zero.
642  *
643  * Zero a block of memory in user space.  Caller must check
644  * the specified block with access_ok() before calling this function.
645  *
646  * Returns number of bytes that could not be cleared.
647  * On success, this will be zero.
648  */
649 static inline __kernel_size_t
650 __clear_user(void __user *addr, __kernel_size_t size)
651 {
652         __kernel_size_t res;
653
654 #ifdef CONFIG_CPU_MICROMIPS
655 /* micromips memset / bzero also clobbers t7 & t8 */
656 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
657 #else
658 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
659 #endif /* CONFIG_CPU_MICROMIPS */
660
661         if (eva_kernel_access()) {
662                 __asm__ __volatile__(
663                         "move\t$4, %1\n\t"
664                         "move\t$5, $0\n\t"
665                         "move\t$6, %2\n\t"
666                         __MODULE_JAL(__bzero_kernel)
667                         "move\t%0, $6"
668                         : "=r" (res)
669                         : "r" (addr), "r" (size)
670                         : bzero_clobbers);
671         } else {
672                 might_fault();
673                 __asm__ __volatile__(
674                         "move\t$4, %1\n\t"
675                         "move\t$5, $0\n\t"
676                         "move\t$6, %2\n\t"
677                         __MODULE_JAL(__bzero)
678                         "move\t%0, $6"
679                         : "=r" (res)
680                         : "r" (addr), "r" (size)
681                         : bzero_clobbers);
682         }
683
684         return res;
685 }
686
687 #define clear_user(addr,n)                                              \
688 ({                                                                      \
689         void __user * __cl_addr = (addr);                               \
690         unsigned long __cl_size = (n);                                  \
691         if (__cl_size && access_ok(__cl_addr, __cl_size))               \
692                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
693         __cl_size;                                                      \
694 })
695
696 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
697 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
698
699 /*
700  * strncpy_from_user: - Copy a NUL terminated string from userspace.
701  * @dst:   Destination address, in kernel space.  This buffer must be at
702  *         least @count bytes long.
703  * @src:   Source address, in user space.
704  * @count: Maximum number of bytes to copy, including the trailing NUL.
705  *
706  * Copies a NUL-terminated string from userspace to kernel space.
707  *
708  * On success, returns the length of the string (not including the trailing
709  * NUL).
710  *
711  * If access to userspace fails, returns -EFAULT (some data may have been
712  * copied).
713  *
714  * If @count is smaller than the length of the string, copies @count bytes
715  * and returns @count.
716  */
717 static inline long
718 strncpy_from_user(char *__to, const char __user *__from, long __len)
719 {
720         long res;
721
722         if (eva_kernel_access()) {
723                 __asm__ __volatile__(
724                         "move\t$4, %1\n\t"
725                         "move\t$5, %2\n\t"
726                         "move\t$6, %3\n\t"
727                         __MODULE_JAL(__strncpy_from_kernel_asm)
728                         "move\t%0, $2"
729                         : "=r" (res)
730                         : "r" (__to), "r" (__from), "r" (__len)
731                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
732         } else {
733                 might_fault();
734                 __asm__ __volatile__(
735                         "move\t$4, %1\n\t"
736                         "move\t$5, %2\n\t"
737                         "move\t$6, %3\n\t"
738                         __MODULE_JAL(__strncpy_from_user_asm)
739                         "move\t%0, $2"
740                         : "=r" (res)
741                         : "r" (__to), "r" (__from), "r" (__len)
742                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
743         }
744
745         return res;
746 }
747
748 extern long __strnlen_kernel_asm(const char __user *s, long n);
749 extern long __strnlen_user_asm(const char __user *s, long n);
750
751 /*
752  * strnlen_user: - Get the size of a string in user space.
753  * @str: The string to measure.
754  *
755  * Context: User context only. This function may sleep if pagefaults are
756  *          enabled.
757  *
758  * Get the size of a NUL-terminated string in user space.
759  *
760  * Returns the size of the string INCLUDING the terminating NUL.
761  * On exception, returns 0.
762  * If the string is too long, returns a value greater than @n.
763  */
764 static inline long strnlen_user(const char __user *s, long n)
765 {
766         long res;
767
768         might_fault();
769         if (eva_kernel_access()) {
770                 __asm__ __volatile__(
771                         "move\t$4, %1\n\t"
772                         "move\t$5, %2\n\t"
773                         __MODULE_JAL(__strnlen_kernel_asm)
774                         "move\t%0, $2"
775                         : "=r" (res)
776                         : "r" (s), "r" (n)
777                         : "$2", "$4", "$5", __UA_t0, "$31");
778         } else {
779                 __asm__ __volatile__(
780                         "move\t$4, %1\n\t"
781                         "move\t$5, %2\n\t"
782                         __MODULE_JAL(__strnlen_user_asm)
783                         "move\t%0, $2"
784                         : "=r" (res)
785                         : "r" (s), "r" (n)
786                         : "$2", "$4", "$5", __UA_t0, "$31");
787         }
788
789         return res;
790 }
791
792 #endif /* _ASM_UACCESS_H */