2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
13 * Mnemonic names for arguments to memcpy/__copy_user
17 #include <asm/asm-offsets.h>
18 #include <asm/export.h>
19 #include <asm/regdef.h>
28 * memcpy copies len bytes from src to dst and sets v0 to dst.
30 * - src and dst don't overlap
33 * memcpy uses the standard calling convention
35 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
36 * the number of uncopied bytes due to an exception caused by a read or write.
37 * __copy_user assumes that src and dst don't overlap, and that the call is
38 * implementing one of the following:
40 * - src is readable (no exceptions when reading src)
42 * - dst is writable (no exceptions when writing dst)
43 * __copy_user uses a non-standard calling convention; see
44 * arch/mips/include/asm/uaccess.h
46 * When an exception happens on a load, the handler must
47 # ensure that all of the destination buffer is overwritten to prevent
48 * leaking information to user mode programs.
56 * The exception handler for loads requires that:
57 * 1- AT contain the address of the byte just past the end of the source
59 * 2- src_entry <= src < AT, and
60 * 3- (dst - src) == (dst_entry - src_entry),
61 * The _entry suffix denotes values when __copy_user was called.
63 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
64 * (2) is met by incrementing src by the number of bytes copied
65 * (3) is met by not doing loads between a pair of increments of dst and src
67 * The exception handlers for stores adjust len (if necessary) and return.
68 * These handlers do not need to overwrite any data.
70 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
71 * they're not protected.
74 #define EXC(inst_reg,addr,handler) \
76 .section __ex_table,"a"; \
81 * Only on the 64-bit kernel we can made use of 64-bit registers.
101 * As we are sharing code base with the mips32 tree (which use the o32 ABI
102 * register definitions). We need to redefine the register definitions from
103 * the n64 ABI register naming to the o32 ABI register naming.
118 #ifdef CONFIG_CPU_LITTLE_ENDIAN
119 #define LDFIRST LOADR
121 #define STFIRST STORER
122 #define STREST STOREL
123 #define SHIFT_DISCARD SLLV
125 #define LDFIRST LOADL
127 #define STFIRST STOREL
128 #define STREST STORER
129 #define SHIFT_DISCARD SRLV
132 #define FIRST(unit) ((unit)*NBYTES)
133 #define REST(unit) (FIRST(unit)+NBYTES-1)
134 #define UNIT(unit) FIRST(unit)
136 #define ADDRMASK (NBYTES-1)
143 * A combined memcpy/__copy_user
144 * __copy_user sets len to 0 for success; else to an upper bound of
145 * the number of uncopied bytes.
146 * memcpy sets v0 to dst.
149 LEAF(memcpy) /* a0=dst a1=src a2=len */
150 EXPORT_SYMBOL(memcpy)
151 move v0, dst /* return value */
153 FEXPORT(__raw_copy_from_user)
154 EXPORT_SYMBOL(__raw_copy_from_user)
155 FEXPORT(__raw_copy_to_user)
156 EXPORT_SYMBOL(__raw_copy_to_user)
157 FEXPORT(__raw_copy_in_user)
158 EXPORT_SYMBOL(__raw_copy_in_user)
160 * Note: dst & src may be unaligned, len may be 0
164 # Octeon doesn't care if the destination is unaligned. The hardware
165 # can fix it faster than we can special case the assembly.
168 sltu t0, len, NBYTES # Check if < 1 word
169 bnez t0, copy_bytes_checklen
170 and t0, src, ADDRMASK # Check if src unaligned
171 bnez t0, src_unaligned
172 sltu t0, len, 4*NBYTES # Check if < 4 words
173 bnez t0, less_than_4units
174 sltu t0, len, 8*NBYTES # Check if < 8 words
175 bnez t0, less_than_8units
176 sltu t0, len, 16*NBYTES # Check if < 16 words
177 bnez t0, cleanup_both_aligned
178 sltu t0, len, 128+1 # Check if len < 129
179 bnez t0, 1f # Skip prefetch if len is too short
180 sltu t0, len, 256+1 # Check if len < 257
181 bnez t0, 1f # Skip prefetch if len is too short
182 pref 0, 128(src) # We must not prefetch invalid addresses
184 # This is where we loop if there is more than 128 bytes left
185 2: pref 0, 256(src) # We must not prefetch invalid addresses
187 # This is where we loop if we can't prefetch anymore
189 EXC( LOAD t0, UNIT(0)(src), l_exc)
190 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
191 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
192 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
193 SUB len, len, 16*NBYTES
194 EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
195 EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
196 EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
197 EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
198 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
199 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
200 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
201 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
202 EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
203 EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
204 EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
205 ADD src, src, 16*NBYTES
206 EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
207 ADD dst, dst, 16*NBYTES
208 EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
209 EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
210 EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
211 EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
212 EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
213 EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
214 EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
215 EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
216 EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
217 EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
218 EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
219 EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
220 EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
221 EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
222 EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
223 EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
224 sltu t0, len, 256+1 # See if we can prefetch more
226 sltu t0, len, 128 # See if we can loop more time
230 # Jump here if there are less than 16*NBYTES left.
232 cleanup_both_aligned:
234 sltu t0, len, 8*NBYTES
235 bnez t0, less_than_8units
237 EXC( LOAD t0, UNIT(0)(src), l_exc)
238 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
239 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
240 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
241 SUB len, len, 8*NBYTES
242 EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
243 EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
244 EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
245 EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
246 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
247 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
248 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
249 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
250 EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
251 EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
252 EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
253 EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
254 ADD src, src, 8*NBYTES
256 ADD dst, dst, 8*NBYTES
258 # Jump here if there are less than 8*NBYTES left.
261 sltu t0, len, 4*NBYTES
262 bnez t0, less_than_4units
264 EXC( LOAD t0, UNIT(0)(src), l_exc)
265 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
266 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
267 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
268 SUB len, len, 4*NBYTES
269 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
270 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
271 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
272 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
273 ADD src, src, 4*NBYTES
275 ADD dst, dst, 4*NBYTES
277 # Jump here if there are less than 4*NBYTES left. This means
278 # we may need to copy up to 3 NBYTES words.
281 sltu t0, len, 1*NBYTES
282 bnez t0, copy_bytes_checklen
285 # 1) Copy NBYTES, then check length again
287 EXC( LOAD t0, 0(src), l_exc)
290 EXC( STORE t0, 0(dst), s_exc_p1u)
292 bnez t1, copy_bytes_checklen
295 # 2) Copy NBYTES, then check length again
297 EXC( LOAD t0, 0(src), l_exc)
300 EXC( STORE t0, 0(dst), s_exc_p1u)
302 bnez t1, copy_bytes_checklen
305 # 3) Copy NBYTES, then check length again
307 EXC( LOAD t0, 0(src), l_exc)
311 b copy_bytes_checklen
312 EXC( STORE t0, -8(dst), s_exc_p1u)
316 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
317 beqz t0, cleanup_src_unaligned
318 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
321 * Avoid consecutive LD*'s to the same register since some mips
322 * implementations can't issue them in the same cycle.
323 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
324 * are to the same unit (unless src is aligned, but it's not).
326 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
327 EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
328 SUB len, len, 4*NBYTES
329 EXC( LDREST t0, REST(0)(src), l_exc_copy)
330 EXC( LDREST t1, REST(1)(src), l_exc_copy)
331 EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
332 EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
333 EXC( LDREST t2, REST(2)(src), l_exc_copy)
334 EXC( LDREST t3, REST(3)(src), l_exc_copy)
335 ADD src, src, 4*NBYTES
336 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
337 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
338 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
339 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
341 ADD dst, dst, 4*NBYTES
343 cleanup_src_unaligned:
345 and rem, len, NBYTES-1 # rem = len % NBYTES
346 beq rem, len, copy_bytes
349 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
350 EXC( LDREST t0, REST(0)(src), l_exc_copy)
352 EXC( STORE t0, 0(dst), s_exc_p1u)
361 /* 0 < len < NBYTES */
362 #define COPY_BYTE(N) \
363 EXC( lb t0, N(src), l_exc); \
366 EXC( sb t0, N(dst), s_exc_p1)
374 EXC( lb t0, NBYTES-2(src), l_exc)
377 EXC( sb t0, NBYTES-2(dst), s_exc_p1)
384 /* Rewind src and dst by 16*NBYTES for l_exc_copy */
385 SUB src, src, 16*NBYTES
386 SUB dst, dst, 16*NBYTES
389 * Copy bytes from src until faulting load address (or until a
392 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
393 * may be more than a byte beyond the last address.
394 * Hence, the lb below may get an exception.
396 * Assumes src < THREAD_BUADDR($28)
398 LOAD t0, TI_TASK($28)
399 LOAD t0, THREAD_BUADDR(t0)
401 EXC( lb t1, 0(src), l_exc)
403 sb t1, 0(dst) # can't fault -- we're copy_from_user
407 LOAD t0, TI_TASK($28)
408 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
409 SUB len, AT, t0 # len number of uncopied bytes
417 ADD len, len, n*NBYTES
445 EXPORT_SYMBOL(memmove)
448 sltu t0, a1, t0 # dst + len <= src -> memcpy
449 sltu t1, a0, t1 # dst >= src + len -> memcpy
452 move v0, a0 /* return value */
456 /* fall through to __rmemcpy */
457 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
459 beqz t0, r_end_bytes_up # src >= dst
461 ADD a0, a2 # dst = dst + len
462 ADD a1, a2 # src = src + len
481 bnez a2, r_end_bytes_up