Merge tag 'vfio-v5.15-rc1' of git://github.com/awilliam/linux-vfio
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
7 #include <asm/page.h>
8 #include <asm/extable.h>
9 #include <asm/kup.h>
10
11 #ifdef __powerpc64__
12 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
13 #define TASK_SIZE_MAX           TASK_SIZE_USER64
14 #else
15 #define TASK_SIZE_MAX           TASK_SIZE
16 #endif
17
18 static inline bool __access_ok(unsigned long addr, unsigned long size)
19 {
20         return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
21 }
22
23 #define access_ok(addr, size)           \
24         (__chk_user_ptr(addr),          \
25          __access_ok((unsigned long)(addr), (size)))
26
27 /*
28  * These are the main single-value transfer routines.  They automatically
29  * use the right size if we just have the right pointer type.
30  *
31  * This gets kind of ugly. We want to return _two_ values in "get_user()"
32  * and yet we don't want to do any pointers, because that is too much
33  * of a performance impact. Thus we have a few rather ugly macros here,
34  * and hide all the ugliness from the user.
35  *
36  * The "__xxx" versions of the user access functions are versions that
37  * do not verify the address space, that must have been done previously
38  * with a separate "access_ok()" call (this is used when we do multiple
39  * accesses to the same area of user memory).
40  *
41  * As we use the same address space for kernel and user data on the
42  * PowerPC, we can just do these as direct assignments.  (Of course, the
43  * exception handling means that it's no longer "just"...)
44  *
45  */
46 #define __put_user(x, ptr)                                      \
47 ({                                                              \
48         long __pu_err;                                          \
49         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
50         __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x);  \
51         __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr));  \
52                                                                 \
53         might_fault();                                          \
54         do {                                                    \
55                 __label__ __pu_failed;                          \
56                                                                 \
57                 allow_write_to_user(__pu_addr, __pu_size);      \
58                 __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed);      \
59                 prevent_write_to_user(__pu_addr, __pu_size);    \
60                 __pu_err = 0;                                   \
61                 break;                                          \
62                                                                 \
63 __pu_failed:                                                    \
64                 prevent_write_to_user(__pu_addr, __pu_size);    \
65                 __pu_err = -EFAULT;                             \
66         } while (0);                                            \
67                                                                 \
68         __pu_err;                                               \
69 })
70
71 #define put_user(x, ptr)                                                \
72 ({                                                                      \
73         __typeof__(*(ptr)) __user *_pu_addr = (ptr);                    \
74                                                                         \
75         access_ok(_pu_addr, sizeof(*(ptr))) ?                           \
76                   __put_user(x, _pu_addr) : -EFAULT;                    \
77 })
78
79 /*
80  * We don't tell gcc that we are accessing memory, but this is OK
81  * because we do not write to any memory gcc knows about, so there
82  * are no aliasing issues.
83  */
84 #define __put_user_asm_goto(x, addr, label, op)                 \
85         asm_volatile_goto(                                      \
86                 "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
87                 EX_TABLE(1b, %l2)                               \
88                 :                                               \
89                 : "r" (x), "m"UPD_CONSTR (*addr)                \
90                 :                                               \
91                 : label)
92
93 #ifdef __powerpc64__
94 #define __put_user_asm2_goto(x, ptr, label)                     \
95         __put_user_asm_goto(x, ptr, label, "std")
96 #else /* __powerpc64__ */
97 #define __put_user_asm2_goto(x, addr, label)                    \
98         asm_volatile_goto(                                      \
99                 "1:     stw%X1 %0, %1\n"                        \
100                 "2:     stw%X1 %L0, %L1\n"                      \
101                 EX_TABLE(1b, %l2)                               \
102                 EX_TABLE(2b, %l2)                               \
103                 :                                               \
104                 : "r" (x), "m" (*addr)                          \
105                 :                                               \
106                 : label)
107 #endif /* __powerpc64__ */
108
109 #define __put_user_size_goto(x, ptr, size, label)               \
110 do {                                                            \
111         __typeof__(*(ptr)) __user *__pus_addr = (ptr);          \
112                                                                 \
113         switch (size) {                                         \
114         case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break;        \
115         case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break;        \
116         case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break;        \
117         case 8: __put_user_asm2_goto(x, __pus_addr, label); break;              \
118         default: BUILD_BUG();                                   \
119         }                                                       \
120 } while (0)
121
122 /*
123  * This does an atomic 128 byte aligned load from userspace.
124  * Upto caller to do enable_kernel_vmx() before calling!
125  */
126 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
127         __asm__ __volatile__(                           \
128                 "1:     lvx  0,0,%1     # get user\n"   \
129                 "       stvx 0,0,%2     # put kernel\n" \
130                 "2:\n"                                  \
131                 ".section .fixup,\"ax\"\n"              \
132                 "3:     li %0,%3\n"                     \
133                 "       b 2b\n"                         \
134                 ".previous\n"                           \
135                 EX_TABLE(1b, 3b)                        \
136                 : "=r" (err)                    \
137                 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
138
139 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
140
141 #define __get_user_asm_goto(x, addr, label, op)                 \
142         asm_volatile_goto(                                      \
143                 "1:     "op"%U1%X1 %0, %1       # get_user\n"   \
144                 EX_TABLE(1b, %l2)                               \
145                 : "=r" (x)                                      \
146                 : "m"UPD_CONSTR (*addr)                         \
147                 :                                               \
148                 : label)
149
150 #ifdef __powerpc64__
151 #define __get_user_asm2_goto(x, addr, label)                    \
152         __get_user_asm_goto(x, addr, label, "ld")
153 #else /* __powerpc64__ */
154 #define __get_user_asm2_goto(x, addr, label)                    \
155         asm_volatile_goto(                                      \
156                 "1:     lwz%X1 %0, %1\n"                        \
157                 "2:     lwz%X1 %L0, %L1\n"                      \
158                 EX_TABLE(1b, %l2)                               \
159                 EX_TABLE(2b, %l2)                               \
160                 : "=&r" (x)                                     \
161                 : "m" (*addr)                                   \
162                 :                                               \
163                 : label)
164 #endif /* __powerpc64__ */
165
166 #define __get_user_size_goto(x, ptr, size, label)                               \
167 do {                                                                            \
168         BUILD_BUG_ON(size > sizeof(x));                                         \
169         switch (size) {                                                         \
170         case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break;  \
171         case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
172         case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
173         case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label);  break;      \
174         default: x = 0; BUILD_BUG();                                            \
175         }                                                                       \
176 } while (0)
177
178 #define __get_user_size_allowed(x, ptr, size, retval)                   \
179 do {                                                                    \
180                 __label__ __gus_failed;                                 \
181                                                                         \
182                 __get_user_size_goto(x, ptr, size, __gus_failed);       \
183                 retval = 0;                                             \
184                 break;                                                  \
185 __gus_failed:                                                           \
186                 x = 0;                                                  \
187                 retval = -EFAULT;                                       \
188 } while (0)
189
190 #else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
191
192 #define __get_user_asm(x, addr, err, op)                \
193         __asm__ __volatile__(                           \
194                 "1:     "op"%U2%X2 %1, %2       # get_user\n"   \
195                 "2:\n"                                  \
196                 ".section .fixup,\"ax\"\n"              \
197                 "3:     li %0,%3\n"                     \
198                 "       li %1,0\n"                      \
199                 "       b 2b\n"                         \
200                 ".previous\n"                           \
201                 EX_TABLE(1b, 3b)                        \
202                 : "=r" (err), "=r" (x)                  \
203                 : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
204
205 #ifdef __powerpc64__
206 #define __get_user_asm2(x, addr, err)                   \
207         __get_user_asm(x, addr, err, "ld")
208 #else /* __powerpc64__ */
209 #define __get_user_asm2(x, addr, err)                   \
210         __asm__ __volatile__(                           \
211                 "1:     lwz%X2 %1, %2\n"                        \
212                 "2:     lwz%X2 %L1, %L2\n"              \
213                 "3:\n"                                  \
214                 ".section .fixup,\"ax\"\n"              \
215                 "4:     li %0,%3\n"                     \
216                 "       li %1,0\n"                      \
217                 "       li %1+1,0\n"                    \
218                 "       b 3b\n"                         \
219                 ".previous\n"                           \
220                 EX_TABLE(1b, 4b)                        \
221                 EX_TABLE(2b, 4b)                        \
222                 : "=r" (err), "=&r" (x)                 \
223                 : "m" (*addr), "i" (-EFAULT), "0" (err))
224 #endif /* __powerpc64__ */
225
226 #define __get_user_size_allowed(x, ptr, size, retval)           \
227 do {                                                            \
228         retval = 0;                                             \
229         BUILD_BUG_ON(size > sizeof(x));                         \
230         switch (size) {                                         \
231         case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;      \
232         case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;     \
233         case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;     \
234         case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;  \
235         default: x = 0; BUILD_BUG();                            \
236         }                                                       \
237 } while (0)
238
239 #define __get_user_size_goto(x, ptr, size, label)               \
240 do {                                                            \
241         long __gus_retval;                                      \
242                                                                 \
243         __get_user_size_allowed(x, ptr, size, __gus_retval);    \
244         if (__gus_retval)                                       \
245                 goto label;                                     \
246 } while (0)
247
248 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
249
250 /*
251  * This is a type: either unsigned long, if the argument fits into
252  * that type, or otherwise unsigned long long.
253  */
254 #define __long_type(x) \
255         __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
256
257 #define __get_user(x, ptr)                                      \
258 ({                                                              \
259         long __gu_err;                                          \
260         __long_type(*(ptr)) __gu_val;                           \
261         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
262         __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr));  \
263                                                                 \
264         might_fault();                                  \
265         allow_read_from_user(__gu_addr, __gu_size);             \
266         __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err);      \
267         prevent_read_from_user(__gu_addr, __gu_size);           \
268         (x) = (__typeof__(*(ptr)))__gu_val;                     \
269                                                                 \
270         __gu_err;                                               \
271 })
272
273 #define get_user(x, ptr)                                                \
274 ({                                                                      \
275         __typeof__(*(ptr)) __user *_gu_addr = (ptr);                    \
276                                                                         \
277         access_ok(_gu_addr, sizeof(*(ptr))) ?                           \
278                   __get_user(x, _gu_addr) :                             \
279                   ((x) = (__force __typeof__(*(ptr)))0, -EFAULT);       \
280 })
281
282 /* more complex routines */
283
284 extern unsigned long __copy_tofrom_user(void __user *to,
285                 const void __user *from, unsigned long size);
286
287 #ifdef __powerpc64__
288 static inline unsigned long
289 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
290 {
291         unsigned long ret;
292
293         allow_read_write_user(to, from, n);
294         ret = __copy_tofrom_user(to, from, n);
295         prevent_read_write_user(to, from, n);
296         return ret;
297 }
298 #endif /* __powerpc64__ */
299
300 static inline unsigned long raw_copy_from_user(void *to,
301                 const void __user *from, unsigned long n)
302 {
303         unsigned long ret;
304
305         allow_read_from_user(from, n);
306         ret = __copy_tofrom_user((__force void __user *)to, from, n);
307         prevent_read_from_user(from, n);
308         return ret;
309 }
310
311 static inline unsigned long
312 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
313 {
314         unsigned long ret;
315
316         allow_write_to_user(to, n);
317         ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
318         prevent_write_to_user(to, n);
319         return ret;
320 }
321
322 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
323
324 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
325 {
326         unsigned long ret;
327
328         might_fault();
329         allow_write_to_user(addr, size);
330         ret = __arch_clear_user(addr, size);
331         prevent_write_to_user(addr, size);
332         return ret;
333 }
334
335 static inline unsigned long clear_user(void __user *addr, unsigned long size)
336 {
337         return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
338 }
339
340 extern long strncpy_from_user(char *dst, const char __user *src, long count);
341 extern __must_check long strnlen_user(const char __user *str, long n);
342
343 #ifdef CONFIG_ARCH_HAS_COPY_MC
344 unsigned long __must_check
345 copy_mc_generic(void *to, const void *from, unsigned long size);
346
347 static inline unsigned long __must_check
348 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
349 {
350         return copy_mc_generic(to, from, size);
351 }
352 #define copy_mc_to_kernel copy_mc_to_kernel
353
354 static inline unsigned long __must_check
355 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
356 {
357         if (likely(check_copy_size(from, n, true))) {
358                 if (access_ok(to, n)) {
359                         allow_write_to_user(to, n);
360                         n = copy_mc_generic((void *)to, from, n);
361                         prevent_write_to_user(to, n);
362                 }
363         }
364
365         return n;
366 }
367 #endif
368
369 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
370                 unsigned size);
371 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
372                            size_t len);
373
374 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
375 {
376         if (unlikely(!access_ok(ptr, len)))
377                 return false;
378
379         might_fault();
380
381         allow_read_write_user((void __user *)ptr, ptr, len);
382         return true;
383 }
384 #define user_access_begin       user_access_begin
385 #define user_access_end         prevent_current_access_user
386 #define user_access_save        prevent_user_access_return
387 #define user_access_restore     restore_user_access
388
389 static __must_check inline bool
390 user_read_access_begin(const void __user *ptr, size_t len)
391 {
392         if (unlikely(!access_ok(ptr, len)))
393                 return false;
394
395         might_fault();
396
397         allow_read_from_user(ptr, len);
398         return true;
399 }
400 #define user_read_access_begin  user_read_access_begin
401 #define user_read_access_end            prevent_current_read_from_user
402
403 static __must_check inline bool
404 user_write_access_begin(const void __user *ptr, size_t len)
405 {
406         if (unlikely(!access_ok(ptr, len)))
407                 return false;
408
409         might_fault();
410
411         allow_write_to_user((void __user *)ptr, len);
412         return true;
413 }
414 #define user_write_access_begin user_write_access_begin
415 #define user_write_access_end           prevent_current_write_to_user
416
417 #define unsafe_get_user(x, p, e) do {                                   \
418         __long_type(*(p)) __gu_val;                             \
419         __typeof__(*(p)) __user *__gu_addr = (p);               \
420                                                                 \
421         __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
422         (x) = (__typeof__(*(p)))__gu_val;                       \
423 } while (0)
424
425 #define unsafe_put_user(x, p, e) \
426         __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
427
428 #define unsafe_copy_from_user(d, s, l, e) \
429 do {                                                                                    \
430         u8 *_dst = (u8 *)(d);                                                           \
431         const u8 __user *_src = (const u8 __user *)(s);                                 \
432         size_t _len = (l);                                                              \
433         int _i;                                                                         \
434                                                                                         \
435         for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))               \
436                 unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e);     \
437         if (_len & 4) {                                                                 \
438                 unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e);     \
439                 _i += 4;                                                                \
440         }                                                                               \
441         if (_len & 2) {                                                                 \
442                 unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e);     \
443                 _i += 2;                                                                \
444         }                                                                               \
445         if (_len & 1)                                                                   \
446                 unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e);       \
447 } while (0)
448
449 #define unsafe_copy_to_user(d, s, l, e) \
450 do {                                                                    \
451         u8 __user *_dst = (u8 __user *)(d);                             \
452         const u8 *_src = (const u8 *)(s);                               \
453         size_t _len = (l);                                              \
454         int _i;                                                         \
455                                                                         \
456         for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64))       \
457                 unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
458         if (_len & 4) {                                                 \
459                 unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
460                 _i += 4;                                                \
461         }                                                               \
462         if (_len & 2) {                                                 \
463                 unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
464                 _i += 2;                                                \
465         }                                                               \
466         if (_len & 1) \
467                 unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
468 } while (0)
469
470 #define HAVE_GET_KERNEL_NOFAULT
471
472 #define __get_kernel_nofault(dst, src, type, err_label)                 \
473         __get_user_size_goto(*((type *)(dst)),                          \
474                 (__force type __user *)(src), sizeof(type), err_label)
475
476 #define __put_kernel_nofault(dst, src, type, err_label)                 \
477         __put_user_size_goto(*((type *)(src)),                          \
478                 (__force type __user *)(dst), sizeof(type), err_label)
479
480 #endif  /* _ARCH_POWERPC_UACCESS_H */