1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm/include/asm/uaccess.h
5 #ifndef _ASMARM_UACCESS_H
6 #define _ASMARM_UACCESS_H
9 * User space memory access functions
11 #include <linux/string.h>
12 #include <asm/memory.h>
13 #include <asm/domain.h>
14 #include <asm/unified.h>
15 #include <asm/compiler.h>
17 #include <asm/extable.h>
20 * These two functions allow hooking accesses to userspace to increase
21 * system integrity by ensuring that the kernel can not inadvertantly
22 * perform such accesses (eg, via list poison values) which could then
23 * be exploited for priviledge escalation.
25 static __always_inline unsigned int uaccess_save_and_enable(void)
27 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
28 unsigned int old_domain = get_domain();
30 /* Set the current domain access to permit user accesses */
31 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
32 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
40 static __always_inline void uaccess_restore(unsigned int flags)
42 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
43 /* Restore the user access mask */
49 * These two are intentionally not defined anywhere - if the kernel
50 * code generates any references to them, that's a bug.
52 extern int __get_user_bad(void);
53 extern int __put_user_bad(void);
58 * We use 33-bit arithmetic here. Success returns zero, failure returns
59 * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS,
60 * so this will always return success in that case.
62 #define __range_ok(addr, size) ({ \
63 unsigned long flag, roksum; \
64 __chk_user_ptr(addr); \
65 __asm__(".syntax unified\n" \
66 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
67 : "=&r" (flag), "=&r" (roksum) \
68 : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \
73 * This is a type: either unsigned long, if the argument fits into
74 * that type, or otherwise unsigned long long.
76 #define __inttype(x) \
77 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
80 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
81 * is above the current addr_limit.
83 #define uaccess_mask_range_ptr(ptr, size) \
84 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
85 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
88 void __user *safe_ptr = (void __user *)ptr;
96 " subshs %1, %1, %2\n"
98 : "+r" (safe_ptr), "=&r" (tmp)
99 : "r" (size), "r" (TASK_SIZE)
107 * Single-value transfer routines. They automatically use the right
108 * size if we just have the right pointer type. Note that the functions
109 * which read from user space (*get_*) need to take care not to leak
110 * kernel data even if the calling code is buggy and fails to check
111 * the return value. This means zeroing out the destination variable
112 * or buffer on error. Normally this is done out of line by the
113 * fixup code, but there are a few places where it intrudes on the
114 * main code path. When we only write to user space, there is no
117 extern int __get_user_1(void *);
118 extern int __get_user_2(void *);
119 extern int __get_user_4(void *);
120 extern int __get_user_32t_8(void *);
121 extern int __get_user_8(void *);
122 extern int __get_user_64t_1(void *);
123 extern int __get_user_64t_2(void *);
124 extern int __get_user_64t_4(void *);
126 #define __GUP_CLOBBER_1 "lr", "cc"
127 #ifdef CONFIG_CPU_USE_DOMAINS
128 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
130 #define __GUP_CLOBBER_2 "lr", "cc"
132 #define __GUP_CLOBBER_4 "lr", "cc"
133 #define __GUP_CLOBBER_32t_8 "lr", "cc"
134 #define __GUP_CLOBBER_8 "lr", "cc"
136 #define __get_user_x(__r2, __p, __e, __l, __s) \
137 __asm__ __volatile__ ( \
138 __asmeq("%0", "r0") __asmeq("%1", "r2") \
139 __asmeq("%3", "r1") \
140 "bl __get_user_" #__s \
141 : "=&r" (__e), "=r" (__r2) \
142 : "0" (__p), "r" (__l) \
143 : __GUP_CLOBBER_##__s)
145 /* narrowing a double-word get into a single 32bit word register: */
147 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
148 __get_user_x(__r2, __p, __e, __l, 32t_8)
150 #define __get_user_x_32t __get_user_x
154 * storing result into proper least significant word of 64bit target var,
155 * different only for big endian case where 64 bit __r2 lsw is r3:
158 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
159 __asm__ __volatile__ ( \
160 __asmeq("%0", "r0") __asmeq("%1", "r2") \
161 __asmeq("%3", "r1") \
162 "bl __get_user_64t_" #__s \
163 : "=&r" (__e), "=r" (__r2) \
164 : "0" (__p), "r" (__l) \
165 : __GUP_CLOBBER_##__s)
167 #define __get_user_x_64t __get_user_x
171 #define __get_user_check(x, p) \
173 unsigned long __limit = TASK_SIZE - 1; \
174 register typeof(*(p)) __user *__p asm("r0") = (p); \
175 register __inttype(x) __r2 asm("r2"); \
176 register unsigned long __l asm("r1") = __limit; \
177 register int __e asm("r0"); \
178 unsigned int __ua_flags = uaccess_save_and_enable(); \
179 switch (sizeof(*(__p))) { \
181 if (sizeof((x)) >= 8) \
182 __get_user_x_64t(__r2, __p, __e, __l, 1); \
184 __get_user_x(__r2, __p, __e, __l, 1); \
187 if (sizeof((x)) >= 8) \
188 __get_user_x_64t(__r2, __p, __e, __l, 2); \
190 __get_user_x(__r2, __p, __e, __l, 2); \
193 if (sizeof((x)) >= 8) \
194 __get_user_x_64t(__r2, __p, __e, __l, 4); \
196 __get_user_x(__r2, __p, __e, __l, 4); \
199 if (sizeof((x)) < 8) \
200 __get_user_x_32t(__r2, __p, __e, __l, 4); \
202 __get_user_x(__r2, __p, __e, __l, 8); \
204 default: __e = __get_user_bad(); break; \
206 uaccess_restore(__ua_flags); \
207 x = (typeof(*(p))) __r2; \
211 #define get_user(x, p) \
214 __get_user_check(x, p); \
217 extern int __put_user_1(void *, unsigned int);
218 extern int __put_user_2(void *, unsigned int);
219 extern int __put_user_4(void *, unsigned int);
220 extern int __put_user_8(void *, unsigned long long);
222 #define __put_user_check(__pu_val, __ptr, __err, __s) \
224 unsigned long __limit = TASK_SIZE - 1; \
225 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
226 register const void __user *__p asm("r0") = __ptr; \
227 register unsigned long __l asm("r1") = __limit; \
228 register int __e asm("r0"); \
229 __asm__ __volatile__ ( \
230 __asmeq("%0", "r0") __asmeq("%2", "r2") \
231 __asmeq("%3", "r1") \
232 "bl __put_user_" #__s \
234 : "0" (__p), "r" (__r2), "r" (__l) \
235 : "ip", "lr", "cc"); \
239 #else /* CONFIG_MMU */
241 #define __addr_ok(addr) ((void)(addr), 1)
242 #define __range_ok(addr, size) ((void)(addr), 0)
244 #define get_user(x, p) __get_user(x, p)
245 #define __put_user_check __put_user_nocheck
247 #endif /* CONFIG_MMU */
249 #define access_ok(addr, size) (__range_ok(addr, size) == 0)
251 #ifdef CONFIG_CPU_SPECTRE
253 * When mitigating Spectre variant 1, it is not worth fixing the non-
254 * verifying accessors, because we need to add verification of the
255 * address space there. Force these to use the standard get_user()
258 #define __get_user(x, ptr) get_user(x, ptr)
262 * The "__xxx" versions of the user access functions do not verify the
263 * address space - it must have been done previously with a separate
264 * "access_ok()" call.
266 * The "xxx_error" versions set the third argument to EFAULT if an
267 * error occurs, and leave it unchanged on success. Note that these
268 * versions are void (ie, don't return a value as such).
270 #define __get_user(x, ptr) \
273 __get_user_err((x), (ptr), __gu_err, TUSER()); \
277 #define __get_user_err(x, ptr, err, __t) \
279 unsigned long __gu_addr = (unsigned long)(ptr); \
280 unsigned long __gu_val; \
281 unsigned int __ua_flags; \
282 __chk_user_ptr(ptr); \
284 __ua_flags = uaccess_save_and_enable(); \
285 switch (sizeof(*(ptr))) { \
286 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
287 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
288 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
289 default: (__gu_val) = __get_user_bad(); \
291 uaccess_restore(__ua_flags); \
292 (x) = (__typeof__(*(ptr)))__gu_val; \
296 #define __get_user_asm(x, addr, err, instr) \
297 __asm__ __volatile__( \
298 "1: " instr " %1, [%2], #0\n" \
300 " .pushsection .text.fixup,\"ax\"\n" \
306 " .pushsection __ex_table,\"a\"\n" \
310 : "+r" (err), "=&r" (x) \
311 : "r" (addr), "i" (-EFAULT) \
314 #define __get_user_asm_byte(x, addr, err, __t) \
315 __get_user_asm(x, addr, err, "ldrb" __t)
317 #if __LINUX_ARM_ARCH__ >= 6
319 #define __get_user_asm_half(x, addr, err, __t) \
320 __get_user_asm(x, addr, err, "ldrh" __t)
325 #define __get_user_asm_half(x, __gu_addr, err, __t) \
327 unsigned long __b1, __b2; \
328 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
329 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
330 (x) = __b1 | (__b2 << 8); \
333 #define __get_user_asm_half(x, __gu_addr, err, __t) \
335 unsigned long __b1, __b2; \
336 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
337 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
338 (x) = (__b1 << 8) | __b2; \
342 #endif /* __LINUX_ARM_ARCH__ >= 6 */
344 #define __get_user_asm_word(x, addr, err, __t) \
345 __get_user_asm(x, addr, err, "ldr" __t)
347 #define __put_user_switch(x, ptr, __err, __fn) \
349 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
350 __typeof__(*(ptr)) __pu_val = (x); \
351 unsigned int __ua_flags; \
353 __ua_flags = uaccess_save_and_enable(); \
354 switch (sizeof(*(ptr))) { \
355 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
356 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
357 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
358 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
359 default: __err = __put_user_bad(); break; \
361 uaccess_restore(__ua_flags); \
364 #define put_user(x, ptr) \
367 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
371 #ifdef CONFIG_CPU_SPECTRE
373 * When mitigating Spectre variant 1.1, all accessors need to include
374 * verification of the address space.
376 #define __put_user(x, ptr) put_user(x, ptr)
379 #define __put_user(x, ptr) \
382 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
386 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
388 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
389 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
392 #define __put_user_nocheck_1 __put_user_asm_byte
393 #define __put_user_nocheck_2 __put_user_asm_half
394 #define __put_user_nocheck_4 __put_user_asm_word
395 #define __put_user_nocheck_8 __put_user_asm_dword
397 #endif /* !CONFIG_CPU_SPECTRE */
399 #define __put_user_asm(x, __pu_addr, err, instr) \
400 __asm__ __volatile__( \
401 "1: " instr " %1, [%2], #0\n" \
403 " .pushsection .text.fixup,\"ax\"\n" \
408 " .pushsection __ex_table,\"a\"\n" \
413 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
416 #define __put_user_asm_byte(x, __pu_addr, err, __t) \
417 __put_user_asm(x, __pu_addr, err, "strb" __t)
419 #if __LINUX_ARM_ARCH__ >= 6
421 #define __put_user_asm_half(x, __pu_addr, err, __t) \
422 __put_user_asm(x, __pu_addr, err, "strh" __t)
427 #define __put_user_asm_half(x, __pu_addr, err, __t) \
429 unsigned long __temp = (__force unsigned long)(x); \
430 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
431 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
434 #define __put_user_asm_half(x, __pu_addr, err, __t) \
436 unsigned long __temp = (__force unsigned long)(x); \
437 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
438 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
442 #endif /* __LINUX_ARM_ARCH__ >= 6 */
444 #define __put_user_asm_word(x, __pu_addr, err, __t) \
445 __put_user_asm(x, __pu_addr, err, "str" __t)
448 #define __reg_oper0 "%R2"
449 #define __reg_oper1 "%Q2"
451 #define __reg_oper0 "%Q2"
452 #define __reg_oper1 "%R2"
455 #define __put_user_asm_dword(x, __pu_addr, err, __t) \
456 __asm__ __volatile__( \
457 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
458 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
459 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
460 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
462 " .pushsection .text.fixup,\"ax\"\n" \
467 " .pushsection __ex_table,\"a\"\n" \
472 : "+r" (err), "+r" (__pu_addr) \
473 : "r" (x), "i" (-EFAULT) \
476 #define HAVE_GET_KERNEL_NOFAULT
478 #define __get_kernel_nofault(dst, src, type, err_label) \
480 const type *__pk_ptr = (src); \
481 unsigned long __src = (unsigned long)(__pk_ptr); \
484 switch (sizeof(type)) { \
485 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
486 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
487 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
489 u32 *__v32 = (u32*)&__val; \
490 __get_user_asm_word(__v32[0], __src, __err, ""); \
493 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
496 default: __err = __get_user_bad(); break; \
498 *(type *)(dst) = __val; \
503 #define __put_kernel_nofault(dst, src, type, err_label) \
505 const type *__pk_ptr = (dst); \
506 unsigned long __dst = (unsigned long)__pk_ptr; \
508 type __val = *(type *)src; \
509 switch (sizeof(type)) { \
510 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
511 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
512 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
513 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
514 default: __err = __put_user_bad(); break; \
521 extern unsigned long __must_check
522 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
524 static inline unsigned long __must_check
525 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
527 unsigned int __ua_flags;
529 __ua_flags = uaccess_save_and_enable();
530 n = arm_copy_from_user(to, from, n);
531 uaccess_restore(__ua_flags);
535 extern unsigned long __must_check
536 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
537 extern unsigned long __must_check
538 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
540 static inline unsigned long __must_check
541 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
543 #ifndef CONFIG_UACCESS_WITH_MEMCPY
544 unsigned int __ua_flags;
545 __ua_flags = uaccess_save_and_enable();
546 n = arm_copy_to_user(to, from, n);
547 uaccess_restore(__ua_flags);
550 return arm_copy_to_user(to, from, n);
554 extern unsigned long __must_check
555 arm_clear_user(void __user *addr, unsigned long n);
556 extern unsigned long __must_check
557 __clear_user_std(void __user *addr, unsigned long n);
559 static inline unsigned long __must_check
560 __clear_user(void __user *addr, unsigned long n)
562 unsigned int __ua_flags = uaccess_save_and_enable();
563 n = arm_clear_user(addr, n);
564 uaccess_restore(__ua_flags);
569 static inline unsigned long
570 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
572 memcpy(to, (const void __force *)from, n);
575 static inline unsigned long
576 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
578 memcpy((void __force *)to, from, n);
581 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
583 #define INLINE_COPY_TO_USER
584 #define INLINE_COPY_FROM_USER
586 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
588 if (access_ok(to, n))
589 n = __clear_user(to, n);
593 /* These are from lib/ code, and use __get_user() and friends */
594 extern long strncpy_from_user(char *dest, const char __user *src, long count);
596 extern __must_check long strnlen_user(const char __user *str, long n);
598 #endif /* _ASMARM_UACCESS_H */