1 // SPDX-License-Identifier: GPL-2.0
3 * Standard user space access functions based on mvcp/mvcs and doing
4 * interesting things in the secondary space mode.
6 * Copyright IBM Corp. 2006,2014
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
19 #ifdef CONFIG_DEBUG_ENTRY
20 void debug_user_asce(int exit)
22 unsigned long cr1, cr7;
24 __ctl_store(cr1, 1, 1);
25 __ctl_store(cr7, 7, 7);
26 if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
28 panic("incorrect ASCE on kernel %s\n"
29 "cr1: %016lx cr7: %016lx\n"
30 "kernel: %016llx user: %016llx\n",
31 exit ? "exit" : "entry", cr1, cr7,
32 S390_lowcore.kernel_asce, S390_lowcore.user_asce);
35 #endif /*CONFIG_DEBUG_ENTRY */
37 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
38 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
40 static int __init uaccess_init(void)
42 if (test_facility(27))
43 static_branch_enable(&have_mvcos);
46 early_initcall(uaccess_init);
48 static inline int copy_with_mvcos(void)
50 if (static_branch_likely(&have_mvcos))
55 static inline int copy_with_mvcos(void)
61 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
64 register unsigned long reg0 asm("0") = 0x81UL;
65 unsigned long tmp1, tmp2;
69 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
75 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
76 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
78 " clgr %0,%4\n" /* copy crosses next page boundary? */
80 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
85 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
86 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
87 : "d" (reg0) : "cc", "memory");
91 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
94 unsigned long tmp1, tmp2;
99 "0: mvcp 0(%0,%2),0(%1),%3\n"
104 "2: mvcp 0(%0,%2),0(%1),%3\n"
107 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
109 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
111 " clgr %0,%4\n" /* copy crosses next page boundary? */
113 "4: mvcp 0(%4,%2),0(%1),%3\n"
118 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
119 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
120 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
125 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
127 if (copy_with_mvcos())
128 return copy_from_user_mvcos(to, from, n);
129 return copy_from_user_mvcp(to, from, n);
131 EXPORT_SYMBOL(raw_copy_from_user);
133 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
136 register unsigned long reg0 asm("0") = 0x810000UL;
137 unsigned long tmp1, tmp2;
141 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
147 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
148 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
150 " clgr %0,%4\n" /* copy crosses next page boundary? */
152 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
157 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
158 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
159 : "d" (reg0) : "cc", "memory");
163 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
166 unsigned long tmp1, tmp2;
171 "0: mvcs 0(%0,%1),0(%2),%3\n"
176 "2: mvcs 0(%0,%1),0(%2),%3\n"
179 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
181 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
183 " clgr %0,%4\n" /* copy crosses next page boundary? */
185 "4: mvcs 0(%4,%1),0(%2),%3\n"
190 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
191 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
192 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
197 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
199 if (copy_with_mvcos())
200 return copy_to_user_mvcos(to, from, n);
201 return copy_to_user_mvcs(to, from, n);
203 EXPORT_SYMBOL(raw_copy_to_user);
205 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
208 register unsigned long reg0 asm("0") = 0x810081UL;
209 unsigned long tmp1, tmp2;
212 /* FIXME: copy with reduced length. */
214 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
223 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
224 : "d" (reg0) : "cc", "memory");
228 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
239 "1: mvc 0(1,%1),0(%2)\n"
245 "2: mvc 0(256,%1),0(%2)\n"
250 "4: ex %0,1b-0b(%3)\n"
253 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
254 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
259 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
261 if (copy_with_mvcos())
262 return copy_in_user_mvcos(to, from, n);
263 return copy_in_user_mvc(to, from, n);
265 EXPORT_SYMBOL(raw_copy_in_user);
267 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
269 register unsigned long reg0 asm("0") = 0x810000UL;
270 unsigned long tmp1, tmp2;
274 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
279 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
280 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
282 " clgr %0,%3\n" /* copy crosses next page boundary? */
284 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
289 EX_TABLE(0b,2b) EX_TABLE(3b,5b)
290 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
291 : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
295 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
297 unsigned long tmp1, tmp2;
304 " xc 0(1,%1),0(%1)\n"
306 " la %2,255(%1)\n" /* %2 = ptr + 255 */
308 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
310 " clgr %0,%2\n" /* clear crosses next page boundary? */
317 "2: xc 0(256,%1),0(%1)\n"
324 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
325 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
330 unsigned long __clear_user(void __user *to, unsigned long size)
332 if (copy_with_mvcos())
333 return clear_user_mvcos(to, size);
334 return clear_user_xc(to, size);
336 EXPORT_SYMBOL(__clear_user);
338 static inline unsigned long strnlen_user_srst(const char __user *src,
341 register unsigned long reg0 asm("0") = 0;
342 unsigned long tmp1, tmp2;
351 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
355 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
356 : "d" (reg0) : "cc", "memory");
360 unsigned long __strnlen_user(const char __user *src, unsigned long size)
364 return strnlen_user_srst(src, size);
366 EXPORT_SYMBOL(__strnlen_user);
368 long __strncpy_from_user(char *dst, const char __user *src, long size)
370 size_t done, len, offset, len_str;
372 if (unlikely(size <= 0))
376 offset = (size_t)src & (L1_CACHE_BYTES - 1);
377 len = min(size - done, L1_CACHE_BYTES - offset);
378 if (copy_from_user(dst, src, len))
380 len_str = strnlen(dst, len);
384 } while ((len_str == len) && (done < size));
387 EXPORT_SYMBOL(__strncpy_from_user);