Merge tag 'powerpc-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
7 #include <asm/page.h>
8 #include <asm/extable.h>
9 #include <asm/kup.h>
10
11 /*
12  * The fs value determines whether argument validity checking should be
13  * performed or not.  If get_fs() == USER_DS, checking is performed, with
14  * get_fs() == KERNEL_DS, checking is bypassed.
15  *
16  * For historical reasons, these macros are grossly misnamed.
17  *
18  * The fs/ds values are now the highest legal address in the "segment".
19  * This simplifies the checking in the routines below.
20  */
21
22 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
23
24 #define KERNEL_DS       MAKE_MM_SEG(~0UL)
25 #ifdef __powerpc64__
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28 #else
29 #define USER_DS         MAKE_MM_SEG(TASK_SIZE - 1)
30 #endif
31
32 #define get_fs()        (current->thread.addr_limit)
33
34 static inline void set_fs(mm_segment_t fs)
35 {
36         current->thread.addr_limit = fs;
37         /* On user-mode return check addr_limit (fs) is correct */
38         set_thread_flag(TIF_FSCHECK);
39 }
40
41 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
42 #define user_addr_max() (get_fs().seg)
43
44 #ifdef __powerpc64__
45 /*
46  * This check is sufficient because there is a large enough
47  * gap between user addresses and the kernel addresses
48  */
49 #define __access_ok(addr, size, segment)        \
50         (((addr) <= (segment).seg) && ((size) <= (segment).seg))
51
52 #else
53
54 static inline int __access_ok(unsigned long addr, unsigned long size,
55                         mm_segment_t seg)
56 {
57         if (addr > seg.seg)
58                 return 0;
59         return (size == 0 || size - 1 <= seg.seg - addr);
60 }
61
62 #endif
63
64 #define access_ok(addr, size)           \
65         (__chk_user_ptr(addr),          \
66          __access_ok((__force unsigned long)(addr), (size), get_fs()))
67
68 /*
69  * These are the main single-value transfer routines.  They automatically
70  * use the right size if we just have the right pointer type.
71  *
72  * This gets kind of ugly. We want to return _two_ values in "get_user()"
73  * and yet we don't want to do any pointers, because that is too much
74  * of a performance impact. Thus we have a few rather ugly macros here,
75  * and hide all the ugliness from the user.
76  *
77  * The "__xxx" versions of the user access functions are versions that
78  * do not verify the address space, that must have been done previously
79  * with a separate "access_ok()" call (this is used when we do multiple
80  * accesses to the same area of user memory).
81  *
82  * As we use the same address space for kernel and user data on the
83  * PowerPC, we can just do these as direct assignments.  (Of course, the
84  * exception handling means that it's no longer "just"...)
85  *
86  */
87 #define get_user(x, ptr) \
88         __get_user_check((x), (ptr), sizeof(*(ptr)))
89 #define put_user(x, ptr) \
90         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
91
92 #define __get_user(x, ptr) \
93         __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
94 #define __put_user(x, ptr) \
95         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
96 #define __put_user_goto(x, ptr, label) \
97         __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
98
99 #define __get_user_allowed(x, ptr) \
100         __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101
102 #define __get_user_inatomic(x, ptr) \
103         __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
104 #define __put_user_inatomic(x, ptr) \
105         __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
106
107 #ifdef CONFIG_PPC64
108
109 #define ___get_user_instr(gu_op, dest, ptr)                             \
110 ({                                                                      \
111         long __gui_ret = 0;                                             \
112         unsigned long __gui_ptr = (unsigned long)ptr;                   \
113         struct ppc_inst __gui_inst;                                     \
114         unsigned int __prefix, __suffix;                                \
115         __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);  \
116         if (__gui_ret == 0) {                                           \
117                 if ((__prefix >> 26) == OP_PREFIX) {                    \
118                         __gui_ret = gu_op(__suffix,                     \
119                                 (unsigned int __user *)__gui_ptr + 1);  \
120                         __gui_inst = ppc_inst_prefix(__prefix,          \
121                                                      __suffix);         \
122                 } else {                                                \
123                         __gui_inst = ppc_inst(__prefix);                \
124                 }                                                       \
125                 if (__gui_ret == 0)                                     \
126                         (dest) = __gui_inst;                            \
127         }                                                               \
128         __gui_ret;                                                      \
129 })
130
131 #define get_user_instr(x, ptr) \
132         ___get_user_instr(get_user, x, ptr)
133
134 #define __get_user_instr(x, ptr) \
135         ___get_user_instr(__get_user, x, ptr)
136
137 #define __get_user_instr_inatomic(x, ptr) \
138         ___get_user_instr(__get_user_inatomic, x, ptr)
139
140 #else /* !CONFIG_PPC64 */
141 #define get_user_instr(x, ptr) \
142         get_user((x).val, (u32 __user *)(ptr))
143
144 #define __get_user_instr(x, ptr) \
145         __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
146
147 #define __get_user_instr_inatomic(x, ptr) \
148         __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
149
150 #endif /* CONFIG_PPC64 */
151
152 extern long __put_user_bad(void);
153
154 #define __put_user_size_allowed(x, ptr, size, retval)           \
155 do {                                                            \
156         __label__ __pu_failed;                                  \
157                                                                 \
158         retval = 0;                                             \
159         __put_user_size_goto(x, ptr, size, __pu_failed);        \
160         break;                                                  \
161                                                                 \
162 __pu_failed:                                                    \
163         retval = -EFAULT;                                       \
164 } while (0)
165
166 #define __put_user_size(x, ptr, size, retval)                   \
167 do {                                                            \
168         allow_write_to_user(ptr, size);                         \
169         __put_user_size_allowed(x, ptr, size, retval);          \
170         prevent_write_to_user(ptr, size);                       \
171 } while (0)
172
173 #define __put_user_nocheck(x, ptr, size)                        \
174 ({                                                              \
175         long __pu_err;                                          \
176         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
177         __typeof__(*(ptr)) __pu_val = (x);                      \
178         __typeof__(size) __pu_size = (size);                    \
179                                                                 \
180         if (!is_kernel_addr((unsigned long)__pu_addr))          \
181                 might_fault();                                  \
182         __chk_user_ptr(__pu_addr);                              \
183         __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);      \
184                                                                 \
185         __pu_err;                                               \
186 })
187
188 #define __put_user_check(x, ptr, size)                                  \
189 ({                                                                      \
190         long __pu_err = -EFAULT;                                        \
191         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
192         __typeof__(*(ptr)) __pu_val = (x);                              \
193         __typeof__(size) __pu_size = (size);                            \
194                                                                         \
195         might_fault();                                                  \
196         if (access_ok(__pu_addr, __pu_size))                            \
197                 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
198                                                                         \
199         __pu_err;                                                       \
200 })
201
202 #define __put_user_nosleep(x, ptr, size)                        \
203 ({                                                              \
204         long __pu_err;                                          \
205         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
206         __typeof__(*(ptr)) __pu_val = (x);                      \
207         __typeof__(size) __pu_size = (size);                    \
208                                                                 \
209         __chk_user_ptr(__pu_addr);                              \
210         __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
211                                                                 \
212         __pu_err;                                               \
213 })
214
215
216 /*
217  * We don't tell gcc that we are accessing memory, but this is OK
218  * because we do not write to any memory gcc knows about, so there
219  * are no aliasing issues.
220  */
221 #define __put_user_asm_goto(x, addr, label, op)                 \
222         asm volatile goto(                                      \
223                 "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
224                 EX_TABLE(1b, %l2)                               \
225                 :                                               \
226                 : "r" (x), "m<>" (*addr)                                \
227                 :                                               \
228                 : label)
229
230 #ifdef __powerpc64__
231 #define __put_user_asm2_goto(x, ptr, label)                     \
232         __put_user_asm_goto(x, ptr, label, "std")
233 #else /* __powerpc64__ */
234 #define __put_user_asm2_goto(x, addr, label)                    \
235         asm volatile goto(                                      \
236                 "1:     stw%X1 %0, %1\n"                        \
237                 "2:     stw%X1 %L0, %L1\n"                      \
238                 EX_TABLE(1b, %l2)                               \
239                 EX_TABLE(2b, %l2)                               \
240                 :                                               \
241                 : "r" (x), "m" (*addr)                          \
242                 :                                               \
243                 : label)
244 #endif /* __powerpc64__ */
245
246 #define __put_user_size_goto(x, ptr, size, label)               \
247 do {                                                            \
248         switch (size) {                                         \
249         case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;       \
250         case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;       \
251         case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;       \
252         case 8: __put_user_asm2_goto(x, ptr, label); break;     \
253         default: __put_user_bad();                              \
254         }                                                       \
255 } while (0)
256
257 #define __put_user_nocheck_goto(x, ptr, size, label)            \
258 do {                                                            \
259         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
260         if (!is_kernel_addr((unsigned long)__pu_addr))          \
261                 might_fault();                                  \
262         __chk_user_ptr(ptr);                                    \
263         __put_user_size_goto((x), __pu_addr, (size), label);    \
264 } while (0)
265
266
267 extern long __get_user_bad(void);
268
269 /*
270  * This does an atomic 128 byte aligned load from userspace.
271  * Upto caller to do enable_kernel_vmx() before calling!
272  */
273 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
274         __asm__ __volatile__(                           \
275                 "1:     lvx  0,0,%1     # get user\n"   \
276                 "       stvx 0,0,%2     # put kernel\n" \
277                 "2:\n"                                  \
278                 ".section .fixup,\"ax\"\n"              \
279                 "3:     li %0,%3\n"                     \
280                 "       b 2b\n"                         \
281                 ".previous\n"                           \
282                 EX_TABLE(1b, 3b)                        \
283                 : "=r" (err)                    \
284                 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
285
286 #define __get_user_asm(x, addr, err, op)                \
287         __asm__ __volatile__(                           \
288                 "1:     "op"%U2%X2 %1, %2       # get_user\n"   \
289                 "2:\n"                                  \
290                 ".section .fixup,\"ax\"\n"              \
291                 "3:     li %0,%3\n"                     \
292                 "       li %1,0\n"                      \
293                 "       b 2b\n"                         \
294                 ".previous\n"                           \
295                 EX_TABLE(1b, 3b)                        \
296                 : "=r" (err), "=r" (x)                  \
297                 : "m<>" (*addr), "i" (-EFAULT), "0" (err))
298
299 #ifdef __powerpc64__
300 #define __get_user_asm2(x, addr, err)                   \
301         __get_user_asm(x, addr, err, "ld")
302 #else /* __powerpc64__ */
303 #define __get_user_asm2(x, addr, err)                   \
304         __asm__ __volatile__(                           \
305                 "1:     lwz%X2 %1, %2\n"                        \
306                 "2:     lwz%X2 %L1, %L2\n"              \
307                 "3:\n"                                  \
308                 ".section .fixup,\"ax\"\n"              \
309                 "4:     li %0,%3\n"                     \
310                 "       li %1,0\n"                      \
311                 "       li %1+1,0\n"                    \
312                 "       b 3b\n"                         \
313                 ".previous\n"                           \
314                 EX_TABLE(1b, 4b)                        \
315                 EX_TABLE(2b, 4b)                        \
316                 : "=r" (err), "=&r" (x)                 \
317                 : "m" (*addr), "i" (-EFAULT), "0" (err))
318 #endif /* __powerpc64__ */
319
320 #define __get_user_size_allowed(x, ptr, size, retval)           \
321 do {                                                            \
322         retval = 0;                                             \
323         __chk_user_ptr(ptr);                                    \
324         if (size > sizeof(x))                                   \
325                 (x) = __get_user_bad();                         \
326         switch (size) {                                         \
327         case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;      \
328         case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;     \
329         case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;     \
330         case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;  \
331         default: (x) = __get_user_bad();                        \
332         }                                                       \
333 } while (0)
334
335 #define __get_user_size(x, ptr, size, retval)                   \
336 do {                                                            \
337         allow_read_from_user(ptr, size);                        \
338         __get_user_size_allowed(x, ptr, size, retval);          \
339         prevent_read_from_user(ptr, size);                      \
340 } while (0)
341
342 /*
343  * This is a type: either unsigned long, if the argument fits into
344  * that type, or otherwise unsigned long long.
345  */
346 #define __long_type(x) \
347         __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
348
349 #define __get_user_nocheck(x, ptr, size, do_allow)                      \
350 ({                                                              \
351         long __gu_err;                                          \
352         __long_type(*(ptr)) __gu_val;                           \
353         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
354         __typeof__(size) __gu_size = (size);                    \
355                                                                 \
356         __chk_user_ptr(__gu_addr);                              \
357         if (!is_kernel_addr((unsigned long)__gu_addr))          \
358                 might_fault();                                  \
359         barrier_nospec();                                       \
360         if (do_allow)                                                           \
361                 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);      \
362         else                                                                    \
363                 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
364         (x) = (__typeof__(*(ptr)))__gu_val;                     \
365                                                                 \
366         __gu_err;                                               \
367 })
368
369 #define __get_user_check(x, ptr, size)                                  \
370 ({                                                                      \
371         long __gu_err = -EFAULT;                                        \
372         __long_type(*(ptr)) __gu_val = 0;                               \
373         __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
374         __typeof__(size) __gu_size = (size);                            \
375                                                                         \
376         might_fault();                                                  \
377         if (access_ok(__gu_addr, __gu_size)) {                          \
378                 barrier_nospec();                                       \
379                 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
380         }                                                               \
381         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
382                                                                         \
383         __gu_err;                                                       \
384 })
385
386 #define __get_user_nosleep(x, ptr, size)                        \
387 ({                                                              \
388         long __gu_err;                                          \
389         __long_type(*(ptr)) __gu_val;                           \
390         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
391         __typeof__(size) __gu_size = (size);                    \
392                                                                 \
393         __chk_user_ptr(__gu_addr);                              \
394         barrier_nospec();                                       \
395         __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
396         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
397                                                                 \
398         __gu_err;                                               \
399 })
400
401
402 /* more complex routines */
403
404 extern unsigned long __copy_tofrom_user(void __user *to,
405                 const void __user *from, unsigned long size);
406
407 #ifdef CONFIG_ARCH_HAS_COPY_MC
408 unsigned long __must_check
409 copy_mc_generic(void *to, const void *from, unsigned long size);
410
411 static inline unsigned long __must_check
412 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
413 {
414         return copy_mc_generic(to, from, size);
415 }
416 #define copy_mc_to_kernel copy_mc_to_kernel
417
418 static inline unsigned long __must_check
419 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
420 {
421         if (likely(check_copy_size(from, n, true))) {
422                 if (access_ok(to, n)) {
423                         allow_write_to_user(to, n);
424                         n = copy_mc_generic((void *)to, from, n);
425                         prevent_write_to_user(to, n);
426                 }
427         }
428
429         return n;
430 }
431 #endif
432
433 #ifdef __powerpc64__
434 static inline unsigned long
435 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
436 {
437         unsigned long ret;
438
439         barrier_nospec();
440         allow_read_write_user(to, from, n);
441         ret = __copy_tofrom_user(to, from, n);
442         prevent_read_write_user(to, from, n);
443         return ret;
444 }
445 #endif /* __powerpc64__ */
446
447 static inline unsigned long raw_copy_from_user(void *to,
448                 const void __user *from, unsigned long n)
449 {
450         unsigned long ret;
451         if (__builtin_constant_p(n) && (n <= 8)) {
452                 ret = 1;
453
454                 switch (n) {
455                 case 1:
456                         barrier_nospec();
457                         __get_user_size(*(u8 *)to, from, 1, ret);
458                         break;
459                 case 2:
460                         barrier_nospec();
461                         __get_user_size(*(u16 *)to, from, 2, ret);
462                         break;
463                 case 4:
464                         barrier_nospec();
465                         __get_user_size(*(u32 *)to, from, 4, ret);
466                         break;
467                 case 8:
468                         barrier_nospec();
469                         __get_user_size(*(u64 *)to, from, 8, ret);
470                         break;
471                 }
472                 if (ret == 0)
473                         return 0;
474         }
475
476         barrier_nospec();
477         allow_read_from_user(from, n);
478         ret = __copy_tofrom_user((__force void __user *)to, from, n);
479         prevent_read_from_user(from, n);
480         return ret;
481 }
482
483 static inline unsigned long
484 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
485 {
486         if (__builtin_constant_p(n) && (n <= 8)) {
487                 unsigned long ret = 1;
488
489                 switch (n) {
490                 case 1:
491                         __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
492                         break;
493                 case 2:
494                         __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
495                         break;
496                 case 4:
497                         __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
498                         break;
499                 case 8:
500                         __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
501                         break;
502                 }
503                 if (ret == 0)
504                         return 0;
505         }
506
507         return __copy_tofrom_user(to, (__force const void __user *)from, n);
508 }
509
510 static inline unsigned long
511 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
512 {
513         unsigned long ret;
514
515         allow_write_to_user(to, n);
516         ret = raw_copy_to_user_allowed(to, from, n);
517         prevent_write_to_user(to, n);
518         return ret;
519 }
520
521 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
522
523 static inline unsigned long clear_user(void __user *addr, unsigned long size)
524 {
525         unsigned long ret = size;
526         might_fault();
527         if (likely(access_ok(addr, size))) {
528                 allow_write_to_user(addr, size);
529                 ret = __arch_clear_user(addr, size);
530                 prevent_write_to_user(addr, size);
531         }
532         return ret;
533 }
534
535 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
536 {
537         return clear_user(addr, size);
538 }
539
540 extern long strncpy_from_user(char *dst, const char __user *src, long count);
541 extern __must_check long strnlen_user(const char __user *str, long n);
542
543 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
544                 unsigned size);
545 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
546                            size_t len);
547
548 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
549 {
550         if (unlikely(!access_ok(ptr, len)))
551                 return false;
552         allow_read_write_user((void __user *)ptr, ptr, len);
553         return true;
554 }
555 #define user_access_begin       user_access_begin
556 #define user_access_end         prevent_current_access_user
557 #define user_access_save        prevent_user_access_return
558 #define user_access_restore     restore_user_access
559
560 static __must_check inline bool
561 user_read_access_begin(const void __user *ptr, size_t len)
562 {
563         if (unlikely(!access_ok(ptr, len)))
564                 return false;
565         allow_read_from_user(ptr, len);
566         return true;
567 }
568 #define user_read_access_begin  user_read_access_begin
569 #define user_read_access_end            prevent_current_read_from_user
570
571 static __must_check inline bool
572 user_write_access_begin(const void __user *ptr, size_t len)
573 {
574         if (unlikely(!access_ok(ptr, len)))
575                 return false;
576         allow_write_to_user((void __user *)ptr, len);
577         return true;
578 }
579 #define user_write_access_begin user_write_access_begin
580 #define user_write_access_end           prevent_current_write_to_user
581
582 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
583 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
584 #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
585
586 #define unsafe_copy_to_user(d, s, l, e) \
587 do {                                                                    \
588         u8 __user *_dst = (u8 __user *)(d);                             \
589         const u8 *_src = (const u8 *)(s);                               \
590         size_t _len = (l);                                              \
591         int _i;                                                         \
592                                                                         \
593         for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))             \
594                 __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
595         if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {                   \
596                 __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);      \
597                 _i += 4;                                                \
598         }                                                               \
599         if (_len & 2) {                                                 \
600                 __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);      \
601                 _i += 2;                                                \
602         }                                                               \
603         if (_len & 1) \
604                 __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
605 } while (0)
606
607 #endif  /* _ARCH_POWERPC_UACCESS_H */