1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
6 * User space memory access functions
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
14 #include <asm/percpu.h>
16 #ifdef CONFIG_ADDRESS_MASKING
18 * Mask out tag bits from the address.
20 static inline unsigned long __untagged_addr(unsigned long addr)
23 "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM)
25 : [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
30 #define untagged_addr(addr) ({ \
31 unsigned long __addr = (__force unsigned long)(addr); \
32 (__force __typeof__(addr))__untagged_addr(__addr); \
35 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
38 mmap_assert_locked(mm);
39 return addr & (mm)->context.untag_mask;
42 #define untagged_addr_remote(mm, addr) ({ \
43 unsigned long __addr = (__force unsigned long)(addr); \
44 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
50 * The virtual address space space is logically divided into a kernel
51 * half and a user half. When cast to a signed type, user pointers
52 * are positive and kernel pointers are negative.
54 #define valid_user_address(x) ((__force long)(x) >= 0)
57 * User pointers can have tag bits on x86-64. This scheme tolerates
58 * arbitrary values in those bits rather then masking them off.
61 * 1. 'ptr' must be in the user half of the address space
62 * 2. 'ptr+size' must not overflow into kernel addresses
64 * Note that addresses around the sign change are not valid addresses,
65 * and will GP-fault even with LAM enabled if the sign bit is set (see
66 * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
67 * enable it, but not remove it entirely).
69 * So the "overflow into kernel addresses" does not imply some sudden
70 * exact boundary at the sign bit, and we can allow a lot of slop on the
73 * In fact, we could probably remove the size check entirely, since
74 * any kernel accesses will be in increasing address order starting
75 * at 'ptr', and even if the end might be in kernel space, we'll
76 * hit the GP faults for non-canonical accesses before we ever get
79 * That's a separate optimization, for now just handle the small
82 static inline bool __access_ok(const void __user *ptr, unsigned long size)
84 if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
85 return valid_user_address(ptr);
87 unsigned long sum = size + (__force unsigned long)ptr;
89 return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
92 #define __access_ok __access_ok
95 * Copy To/From Userspace
98 /* Handles exceptions in both to and from, but doesn't do access_ok */
99 __must_check unsigned long
100 rep_movs_alternative(void *to, const void *from, unsigned len);
102 static __always_inline __must_check unsigned long
103 copy_user_generic(void *to, const void *from, unsigned long len)
107 * If CPU has FSRM feature, use 'rep movs'.
108 * Otherwise, use rep_movs_alternative.
112 ALTERNATIVE("rep movsb",
113 "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
115 _ASM_EXTABLE_UA(1b, 2b)
116 :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
117 : : "memory", "rax");
122 static __always_inline __must_check unsigned long
123 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
125 return copy_user_generic(dst, (__force void *)src, size);
128 static __always_inline __must_check unsigned long
129 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
131 return copy_user_generic((__force void *)dst, src, size);
134 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
135 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
138 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
142 kasan_check_write(dst, size);
144 ret = __copy_user_nocache(dst, src, size);
150 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
152 kasan_check_write(dst, size);
153 return __copy_user_flushcache(dst, src, size);
160 __must_check unsigned long
161 rep_stos_alternative(void __user *addr, unsigned long len);
163 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
169 * No memory constraint because it doesn't change any memory gcc
174 ALTERNATIVE("rep stosb",
175 "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
177 _ASM_EXTABLE_UA(1b, 2b)
178 : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
186 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
188 if (__access_ok(to, n))
189 return __clear_user(to, n);
192 #endif /* _ASM_X86_UACCESS_64_H */