1 // SPDX-License-Identifier: GPL-2.0
3 * Standard user space access functions based on mvcp/mvcs and doing
4 * interesting things in the secondary space mode.
6 * Copyright IBM Corp. 2006,2014
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
19 #ifdef CONFIG_DEBUG_USER_ASCE
20 void debug_user_asce(void)
22 unsigned long cr1, cr7;
24 __ctl_store(cr1, 1, 1);
25 __ctl_store(cr7, 7, 7);
26 if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
28 panic("incorrect ASCE on kernel exit\n"
29 "cr1: %016lx cr7: %016lx\n"
30 "kernel: %016llx user: %016llx\n",
31 cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce);
33 #endif /*CONFIG_DEBUG_USER_ASCE */
35 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
36 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
38 static int __init uaccess_init(void)
40 if (test_facility(27))
41 static_branch_enable(&have_mvcos);
44 early_initcall(uaccess_init);
46 static inline int copy_with_mvcos(void)
48 if (static_branch_likely(&have_mvcos))
53 static inline int copy_with_mvcos(void)
59 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
62 register unsigned long reg0 asm("0") = 0x81UL;
63 unsigned long tmp1, tmp2;
67 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
73 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
74 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
76 " clgr %0,%4\n" /* copy crosses next page boundary? */
78 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
83 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
84 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
85 : "d" (reg0) : "cc", "memory");
89 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
92 unsigned long tmp1, tmp2;
97 "0: mvcp 0(%0,%2),0(%1),%3\n"
102 "2: mvcp 0(%0,%2),0(%1),%3\n"
105 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
107 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
109 " clgr %0,%4\n" /* copy crosses next page boundary? */
111 "4: mvcp 0(%4,%2),0(%1),%3\n"
116 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
117 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
118 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
123 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
125 if (copy_with_mvcos())
126 return copy_from_user_mvcos(to, from, n);
127 return copy_from_user_mvcp(to, from, n);
129 EXPORT_SYMBOL(raw_copy_from_user);
131 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
134 register unsigned long reg0 asm("0") = 0x810000UL;
135 unsigned long tmp1, tmp2;
139 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
145 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
146 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
148 " clgr %0,%4\n" /* copy crosses next page boundary? */
150 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
155 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
156 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
157 : "d" (reg0) : "cc", "memory");
161 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
164 unsigned long tmp1, tmp2;
169 "0: mvcs 0(%0,%1),0(%2),%3\n"
174 "2: mvcs 0(%0,%1),0(%2),%3\n"
177 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
179 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
181 " clgr %0,%4\n" /* copy crosses next page boundary? */
183 "4: mvcs 0(%4,%1),0(%2),%3\n"
188 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
189 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
190 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
195 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
197 if (copy_with_mvcos())
198 return copy_to_user_mvcos(to, from, n);
199 return copy_to_user_mvcs(to, from, n);
201 EXPORT_SYMBOL(raw_copy_to_user);
203 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
206 register unsigned long reg0 asm("0") = 0x810081UL;
207 unsigned long tmp1, tmp2;
210 /* FIXME: copy with reduced length. */
212 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
221 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
222 : "d" (reg0) : "cc", "memory");
226 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
237 "1: mvc 0(1,%1),0(%2)\n"
243 "2: mvc 0(256,%1),0(%2)\n"
248 "4: ex %0,1b-0b(%3)\n"
251 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
252 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
257 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
259 if (copy_with_mvcos())
260 return copy_in_user_mvcos(to, from, n);
261 return copy_in_user_mvc(to, from, n);
263 EXPORT_SYMBOL(raw_copy_in_user);
265 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
267 register unsigned long reg0 asm("0") = 0x810000UL;
268 unsigned long tmp1, tmp2;
272 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
277 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
278 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
280 " clgr %0,%3\n" /* copy crosses next page boundary? */
282 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
287 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
288 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
289 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
293 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
295 unsigned long tmp1, tmp2;
302 " xc 0(1,%1),0(%1)\n"
304 " la %2,255(%1)\n" /* %2 = ptr + 255 */
306 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
308 " clgr %0,%2\n" /* clear crosses next page boundary? */
315 "2: xc 0(256,%1),0(%1)\n"
322 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
323 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
328 unsigned long __clear_user(void __user *to, unsigned long size)
330 if (copy_with_mvcos())
331 return clear_user_mvcos(to, size);
332 return clear_user_xc(to, size);
334 EXPORT_SYMBOL(__clear_user);
336 static inline unsigned long strnlen_user_srst(const char __user *src,
339 register unsigned long reg0 asm("0") = 0;
340 unsigned long tmp1, tmp2;
349 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
353 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
354 : "d" (reg0) : "cc", "memory");
358 unsigned long __strnlen_user(const char __user *src, unsigned long size)
362 return strnlen_user_srst(src, size);
364 EXPORT_SYMBOL(__strnlen_user);
366 long __strncpy_from_user(char *dst, const char __user *src, long size)
368 size_t done, len, offset, len_str;
370 if (unlikely(size <= 0))
374 offset = (size_t)src & (L1_CACHE_BYTES - 1);
375 len = min(size - done, L1_CACHE_BYTES - offset);
376 if (copy_from_user(dst, src, len))
378 len_str = strnlen(dst, len);
382 } while ((len_str == len) && (done < size));
385 EXPORT_SYMBOL(__strncpy_from_user);