Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <asm/processor.h>
6 #include <asm/page.h>
7 #include <asm/extable.h>
8 #include <asm/kup.h>
9
10 #ifdef __powerpc64__
11 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
12 #define TASK_SIZE_MAX           TASK_SIZE_USER64
13 #endif
14
15 #include <asm-generic/access_ok.h>
16
17 /*
18  * These are the main single-value transfer routines.  They automatically
19  * use the right size if we just have the right pointer type.
20  *
21  * This gets kind of ugly. We want to return _two_ values in "get_user()"
22  * and yet we don't want to do any pointers, because that is too much
23  * of a performance impact. Thus we have a few rather ugly macros here,
24  * and hide all the ugliness from the user.
25  *
26  * The "__xxx" versions of the user access functions are versions that
27  * do not verify the address space, that must have been done previously
28  * with a separate "access_ok()" call (this is used when we do multiple
29  * accesses to the same area of user memory).
30  *
31  * As we use the same address space for kernel and user data on the
32  * PowerPC, we can just do these as direct assignments.  (Of course, the
33  * exception handling means that it's no longer "just"...)
34  *
35  */
36 #define __put_user(x, ptr)                                      \
37 ({                                                              \
38         long __pu_err;                                          \
39         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
40         __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x);  \
41         __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr));  \
42                                                                 \
43         might_fault();                                          \
44         do {                                                    \
45                 __label__ __pu_failed;                          \
46                                                                 \
47                 allow_write_to_user(__pu_addr, __pu_size);      \
48                 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed);      \
49                 prevent_write_to_user(__pu_addr, __pu_size);    \
50                 __pu_err = 0;                                   \
51                 break;                                          \
52                                                                 \
53 __pu_failed:                                                    \
54                 prevent_write_to_user(__pu_addr, __pu_size);    \
55                 __pu_err = -EFAULT;                             \
56         } while (0);                                            \
57                                                                 \
58         __pu_err;                                               \
59 })
60
61 #define put_user(x, ptr)                                                \
62 ({                                                                      \
63         __typeof__(*(ptr)) __user *_pu_addr = (ptr);                    \
64                                                                         \
65         access_ok(_pu_addr, sizeof(*(ptr))) ?                           \
66                   __put_user(x, _pu_addr) : -EFAULT;                    \
67 })
68
69 /*
70  * We don't tell gcc that we are accessing memory, but this is OK
71  * because we do not write to any memory gcc knows about, so there
72  * are no aliasing issues.
73  */
74 #define __put_user_asm_goto(x, addr, label, op)                 \
75         asm_volatile_goto(                                      \
76                 "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
77                 EX_TABLE(1b, %l2)                               \
78                 :                                               \
79                 : "r" (x), "m<>" (*addr)                \
80                 :                                               \
81                 : label)
82
83 #ifdef __powerpc64__
84 #define __put_user_asm2_goto(x, ptr, label)                     \
85         __put_user_asm_goto(x, ptr, label, "std")
86 #else /* __powerpc64__ */
87 #define __put_user_asm2_goto(x, addr, label)                    \
88         asm_volatile_goto(                                      \
89                 "1:     stw%X1 %0, %1\n"                        \
90                 "2:     stw%X1 %L0, %L1\n"                      \
91                 EX_TABLE(1b, %l2)                               \
92                 EX_TABLE(2b, %l2)                               \
93                 :                                               \
94                 : "r" (x), "m" (*addr)                          \
95                 :                                               \
96                 : label)
97 #endif /* __powerpc64__ */
98
99 #define __put_user_size_goto(x, ptr, size, label)               \
100 do {                                                            \
101         __typeof__(*(ptr)) __user *__pus_addr = (ptr);          \
102                                                                 \
103         switch (size) {                                         \
104         case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break;        \
105         case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break;        \
106         case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break;        \
107         case 8: __put_user_asm2_goto(x, __pus_addr, label); break;              \
108         default: BUILD_BUG();                                   \
109         }                                                       \
110 } while (0)
111
112 /*
113  * This does an atomic 128 byte aligned load from userspace.
114  * Upto caller to do enable_kernel_vmx() before calling!
115  */
116 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
117         __asm__ __volatile__(                           \
118                 ".machine push\n"                       \
119                 ".machine altivec\n"                    \
120                 "1:     lvx  0,0,%1     # get user\n"   \
121                 "       stvx 0,0,%2     # put kernel\n" \
122                 ".machine pop\n"                        \
123                 "2:\n"                                  \
124                 ".section .fixup,\"ax\"\n"              \
125                 "3:     li %0,%3\n"                     \
126                 "       b 2b\n"                         \
127                 ".previous\n"                           \
128                 EX_TABLE(1b, 3b)                        \
129                 : "=r" (err)                    \
130                 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
131
132 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
133
134 #define __get_user_asm_goto(x, addr, label, op)                 \
135         asm_volatile_goto(                                      \
136                 "1:     "op"%U1%X1 %0, %1       # get_user\n"   \
137                 EX_TABLE(1b, %l2)                               \
138                 : "=r" (x)                                      \
139                 : "m<>" (*addr)                         \
140                 :                                               \
141                 : label)
142
143 #ifdef __powerpc64__
144 #define __get_user_asm2_goto(x, addr, label)                    \
145         __get_user_asm_goto(x, addr, label, "ld")
146 #else /* __powerpc64__ */
147 #define __get_user_asm2_goto(x, addr, label)                    \
148         asm_volatile_goto(                                      \
149                 "1:     lwz%X1 %0, %1\n"                        \
150                 "2:     lwz%X1 %L0, %L1\n"                      \
151                 EX_TABLE(1b, %l2)                               \
152                 EX_TABLE(2b, %l2)                               \
153                 : "=&r" (x)                                     \
154                 : "m" (*addr)                                   \
155                 :                                               \
156                 : label)
157 #endif /* __powerpc64__ */
158
159 #define __get_user_size_goto(x, ptr, size, label)                               \
160 do {                                                                            \
161         BUILD_BUG_ON(size > sizeof(x));                                         \
162         switch (size) {                                                         \
163         case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break;  \
164         case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
165         case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
166         case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label);  break;      \
167         default: x = 0; BUILD_BUG();                                            \
168         }                                                                       \
169 } while (0)
170
171 #define __get_user_size_allowed(x, ptr, size, retval)                   \
172 do {                                                                    \
173                 __label__ __gus_failed;                                 \
174                                                                         \
175                 __get_user_size_goto(x, ptr, size, __gus_failed);       \
176                 retval = 0;                                             \
177                 break;                                                  \
178 __gus_failed:                                                           \
179                 x = 0;                                                  \
180                 retval = -EFAULT;                                       \
181 } while (0)
182
183 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
184
185 #define __get_user_asm(x, addr, err, op)                \
186         __asm__ __volatile__(                           \
187                 "1:     "op"%U2%X2 %1, %2       # get_user\n"   \
188                 "2:\n"                                  \
189                 ".section .fixup,\"ax\"\n"              \
190                 "3:     li %0,%3\n"                     \
191                 "       li %1,0\n"                      \
192                 "       b 2b\n"                         \
193                 ".previous\n"                           \
194                 EX_TABLE(1b, 3b)                        \
195                 : "=r" (err), "=r" (x)                  \
196                 : "m<>" (*addr), "i" (-EFAULT), "0" (err))
197
198 #ifdef __powerpc64__
199 #define __get_user_asm2(x, addr, err)                   \
200         __get_user_asm(x, addr, err, "ld")
201 #else /* __powerpc64__ */
202 #define __get_user_asm2(x, addr, err)                   \
203         __asm__ __volatile__(                           \
204                 "1:     lwz%X2 %1, %2\n"                        \
205                 "2:     lwz%X2 %L1, %L2\n"              \
206                 "3:\n"                                  \
207                 ".section .fixup,\"ax\"\n"              \
208                 "4:     li %0,%3\n"                     \
209                 "       li %1,0\n"                      \
210                 "       li %1+1,0\n"                    \
211                 "       b 3b\n"                         \
212                 ".previous\n"                           \
213                 EX_TABLE(1b, 4b)                        \
214                 EX_TABLE(2b, 4b)                        \
215                 : "=r" (err), "=&r" (x)                 \
216                 : "m" (*addr), "i" (-EFAULT), "0" (err))
217 #endif /* __powerpc64__ */
218
219 #define __get_user_size_allowed(x, ptr, size, retval)           \
220 do {                                                            \
221         retval = 0;                                             \
222         BUILD_BUG_ON(size > sizeof(x));                         \
223         switch (size) {                                         \
224         case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;      \
225         case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;     \
226         case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;     \
227         case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;  \
228         default: x = 0; BUILD_BUG();                            \
229         }                                                       \
230 } while (0)
231
232 #define __get_user_size_goto(x, ptr, size, label)               \
233 do {                                                            \
234         long __gus_retval;                                      \
235                                                                 \
236         __get_user_size_allowed(x, ptr, size, __gus_retval);    \
237         if (__gus_retval)                                       \
238                 goto label;                                     \
239 } while (0)
240
241 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
242
243 /*
244  * This is a type: either unsigned long, if the argument fits into
245  * that type, or otherwise unsigned long long.
246  */
247 #define __long_type(x) \
248         __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
249
250 #define __get_user(x, ptr)                                      \
251 ({                                                              \
252         long __gu_err;                                          \
253         __long_type(*(ptr)) __gu_val;                           \
254         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
255         __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr));  \
256                                                                 \
257         might_fault();                                  \
258         allow_read_from_user(__gu_addr, __gu_size);             \
259         __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err);      \
260         prevent_read_from_user(__gu_addr, __gu_size);           \
261         (x) = (__typeof__(*(ptr)))__gu_val;                     \
262                                                                 \
263         __gu_err;                                               \
264 })
265
266 #define get_user(x, ptr)                                                \
267 ({                                                                      \
268         __typeof__(*(ptr)) __user *_gu_addr = (ptr);                    \
269                                                                         \
270         access_ok(_gu_addr, sizeof(*(ptr))) ?                           \
271                   __get_user(x, _gu_addr) :                             \
272                   ((x) = (__force __typeof__(*(ptr)))0, -EFAULT);       \
273 })
274
275 /* more complex routines */
276
277 extern unsigned long __copy_tofrom_user(void __user *to,
278                 const void __user *from, unsigned long size);
279
280 #ifdef __powerpc64__
281 static inline unsigned long
282 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
283 {
284         unsigned long ret;
285
286         allow_read_write_user(to, from, n);
287         ret = __copy_tofrom_user(to, from, n);
288         prevent_read_write_user(to, from, n);
289         return ret;
290 }
291 #endif /* __powerpc64__ */
292
293 static inline unsigned long raw_copy_from_user(void *to,
294                 const void __user *from, unsigned long n)
295 {
296         unsigned long ret;
297
298         allow_read_from_user(from, n);
299         ret = __copy_tofrom_user((__force void __user *)to, from, n);
300         prevent_read_from_user(from, n);
301         return ret;
302 }
303
304 static inline unsigned long
305 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
306 {
307         unsigned long ret;
308
309         allow_write_to_user(to, n);
310         ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
311         prevent_write_to_user(to, n);
312         return ret;
313 }
314
315 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
316
317 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
318 {
319         unsigned long ret;
320
321         might_fault();
322         allow_write_to_user(addr, size);
323         ret = __arch_clear_user(addr, size);
324         prevent_write_to_user(addr, size);
325         return ret;
326 }
327
328 static inline unsigned long clear_user(void __user *addr, unsigned long size)
329 {
330         return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
331 }
332
333 extern long strncpy_from_user(char *dst, const char __user *src, long count);
334 extern __must_check long strnlen_user(const char __user *str, long n);
335
336 #ifdef CONFIG_ARCH_HAS_COPY_MC
337 unsigned long __must_check
338 copy_mc_generic(void *to, const void *from, unsigned long size);
339
340 static inline unsigned long __must_check
341 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
342 {
343         return copy_mc_generic(to, from, size);
344 }
345 #define copy_mc_to_kernel copy_mc_to_kernel
346
347 static inline unsigned long __must_check
348 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
349 {
350         if (check_copy_size(from, n, true)) {
351                 if (access_ok(to, n)) {
352                         allow_write_to_user(to, n);
353                         n = copy_mc_generic((void *)to, from, n);
354                         prevent_write_to_user(to, n);
355                 }
356         }
357
358         return n;
359 }
360 #endif
361
362 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
363                 unsigned size);
364 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
365                            size_t len);
366
367 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
368 {
369         if (unlikely(!access_ok(ptr, len)))
370                 return false;
371
372         might_fault();
373
374         allow_read_write_user((void __user *)ptr, ptr, len);
375         return true;
376 }
377 #define user_access_begin       user_access_begin
378 #define user_access_end         prevent_current_access_user
379 #define user_access_save        prevent_user_access_return
380 #define user_access_restore     restore_user_access
381
382 static __must_check inline bool
383 user_read_access_begin(const void __user *ptr, size_t len)
384 {
385         if (unlikely(!access_ok(ptr, len)))
386                 return false;
387
388         might_fault();
389
390         allow_read_from_user(ptr, len);
391         return true;
392 }
393 #define user_read_access_begin  user_read_access_begin
394 #define user_read_access_end            prevent_current_read_from_user
395
396 static __must_check inline bool
397 user_write_access_begin(const void __user *ptr, size_t len)
398 {
399         if (unlikely(!access_ok(ptr, len)))
400                 return false;
401
402         might_fault();
403
404         allow_write_to_user((void __user *)ptr, len);
405         return true;
406 }
407 #define user_write_access_begin user_write_access_begin
408 #define user_write_access_end           prevent_current_write_to_user
409
410 #define unsafe_get_user(x, p, e) do {                                   \
411         __long_type(*(p)) __gu_val;                             \
412         __typeof__(*(p)) __user *__gu_addr = (p);               \
413                                                                 \
414         __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
415         (x) = (__typeof__(*(p)))__gu_val;                       \
416 } while (0)
417
418 #define unsafe_put_user(x, p, e) \
419         __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
420
421 #define unsafe_copy_from_user(d, s, l, e) \
422 do {                                                                                    \
423         u8 *_dst = (u8 *)(d);                                                           \
424         const u8 __user *_src = (const u8 __user *)(s);                                 \
425         size_t _len = (l);                                                              \
426         int _i;                                                                         \
427                                                                                         \
428         for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))               \
429                 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e);     \
430         if (_len & 4) {                                                                 \
431                 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e);     \
432                 _i += 4;                                                                \
433         }                                                                               \
434         if (_len & 2) {                                                                 \
435                 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e);     \
436                 _i += 2;                                                                \
437         }                                                                               \
438         if (_len & 1)                                                                   \
439                 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e);       \
440 } while (0)
441
442 #define unsafe_copy_to_user(d, s, l, e) \
443 do {                                                                    \
444         u8 __user *_dst = (u8 __user *)(d);                             \
445         const u8 *_src = (const u8 *)(s);                               \
446         size_t _len = (l);                                              \
447         int _i;                                                         \
448                                                                         \
449         for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))       \
450                 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
451         if (_len & 4) {                                                 \
452                 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
453                 _i += 4;                                                \
454         }                                                               \
455         if (_len & 2) {                                                 \
456                 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
457                 _i += 2;                                                \
458         }                                                               \
459         if (_len & 1) \
460                 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
461 } while (0)
462
463 #define __get_kernel_nofault(dst, src, type, err_label)                 \
464         __get_user_size_goto(*((type *)(dst)),                          \
465                 (__force type __user *)(src), sizeof(type), err_label)
466
467 #define __put_kernel_nofault(dst, src, type, err_label)                 \
468         __put_user_size_goto(*((type *)(src)),                          \
469                 (__force type __user *)(dst), sizeof(type), err_label)
470
471 #endif  /* _ARCH_POWERPC_UACCESS_H */