2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde
13 * Mnemonic names for arguments to memcpy/__copy_user
17 #include <asm/asm-offsets.h>
18 #include <asm/export.h>
19 #include <asm/regdef.h>
28 * memcpy copies len bytes from src to dst and sets v0 to dst.
30 * - src and dst don't overlap
33 * memcpy uses the standard calling convention
35 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
36 * the number of uncopied bytes due to an exception caused by a read or write.
37 * __copy_user assumes that src and dst don't overlap, and that the call is
38 * implementing one of the following:
40 * - src is readable (no exceptions when reading src)
42 * - dst is writable (no exceptions when writing dst)
43 * __copy_user uses a non-standard calling convention; see
44 * arch/mips/include/asm/uaccess.h
46 * When an exception happens on a load, the handler must
47 # ensure that all of the destination buffer is overwritten to prevent
48 * leaking information to user mode programs.
56 * The exception handler for loads requires that:
57 * 1- AT contain the address of the byte just past the end of the source
59 * 2- src_entry <= src < AT, and
60 * 3- (dst - src) == (dst_entry - src_entry),
61 * The _entry suffix denotes values when __copy_user was called.
63 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
64 * (2) is met by incrementing src by the number of bytes copied
65 * (3) is met by not doing loads between a pair of increments of dst and src
67 * The exception handlers for stores adjust len (if necessary) and return.
68 * These handlers do not need to overwrite any data.
70 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
71 * they're not protected.
74 #define EXC(inst_reg,addr,handler) \
76 .section __ex_table,"a"; \
81 * Only on the 64-bit kernel we can made use of 64-bit registers.
101 * As we are sharing code base with the mips32 tree (which use the o32 ABI
102 * register definitions). We need to redefine the register definitions from
103 * the n64 ABI register naming to the o32 ABI register naming.
118 #ifdef CONFIG_CPU_LITTLE_ENDIAN
119 #define LDFIRST LOADR
121 #define STFIRST STORER
122 #define STREST STOREL
123 #define SHIFT_DISCARD SLLV
125 #define LDFIRST LOADL
127 #define STFIRST STOREL
128 #define STREST STORER
129 #define SHIFT_DISCARD SRLV
132 #define FIRST(unit) ((unit)*NBYTES)
133 #define REST(unit) (FIRST(unit)+NBYTES-1)
134 #define UNIT(unit) FIRST(unit)
136 #define ADDRMASK (NBYTES-1)
143 * t7 is used as a flag to note inatomic mode.
145 LEAF(__copy_user_inatomic)
146 EXPORT_SYMBOL(__copy_user_inatomic)
149 END(__copy_user_inatomic)
152 * A combined memcpy/__copy_user
153 * __copy_user sets len to 0 for success; else to an upper bound of
154 * the number of uncopied bytes.
155 * memcpy sets v0 to dst.
158 LEAF(memcpy) /* a0=dst a1=src a2=len */
159 EXPORT_SYMBOL(memcpy)
160 move v0, dst /* return value */
163 EXPORT_SYMBOL(__copy_user)
164 li t7, 0 /* not inatomic */
167 * Note: dst & src may be unaligned, len may be 0
171 # Octeon doesn't care if the destination is unaligned. The hardware
172 # can fix it faster than we can special case the assembly.
175 sltu t0, len, NBYTES # Check if < 1 word
176 bnez t0, copy_bytes_checklen
177 and t0, src, ADDRMASK # Check if src unaligned
178 bnez t0, src_unaligned
179 sltu t0, len, 4*NBYTES # Check if < 4 words
180 bnez t0, less_than_4units
181 sltu t0, len, 8*NBYTES # Check if < 8 words
182 bnez t0, less_than_8units
183 sltu t0, len, 16*NBYTES # Check if < 16 words
184 bnez t0, cleanup_both_aligned
185 sltu t0, len, 128+1 # Check if len < 129
186 bnez t0, 1f # Skip prefetch if len is too short
187 sltu t0, len, 256+1 # Check if len < 257
188 bnez t0, 1f # Skip prefetch if len is too short
189 pref 0, 128(src) # We must not prefetch invalid addresses
191 # This is where we loop if there is more than 128 bytes left
192 2: pref 0, 256(src) # We must not prefetch invalid addresses
194 # This is where we loop if we can't prefetch anymore
196 EXC( LOAD t0, UNIT(0)(src), l_exc)
197 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
198 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
199 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
200 SUB len, len, 16*NBYTES
201 EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
202 EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
203 EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
204 EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
205 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
206 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
207 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
208 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
209 EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
210 EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
211 EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
212 ADD src, src, 16*NBYTES
213 EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
214 ADD dst, dst, 16*NBYTES
215 EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
216 EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
217 EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
218 EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
219 EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
220 EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
221 EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
222 EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
223 EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
224 EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
225 EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
226 EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
227 EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
228 EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
229 EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
230 EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
231 sltu t0, len, 256+1 # See if we can prefetch more
233 sltu t0, len, 128 # See if we can loop more time
237 # Jump here if there are less than 16*NBYTES left.
239 cleanup_both_aligned:
241 sltu t0, len, 8*NBYTES
242 bnez t0, less_than_8units
244 EXC( LOAD t0, UNIT(0)(src), l_exc)
245 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
246 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
247 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
248 SUB len, len, 8*NBYTES
249 EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
250 EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
251 EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
252 EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
253 EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
254 EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
255 EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
256 EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
257 EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
258 EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
259 EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
260 EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
261 ADD src, src, 8*NBYTES
263 ADD dst, dst, 8*NBYTES
265 # Jump here if there are less than 8*NBYTES left.
268 sltu t0, len, 4*NBYTES
269 bnez t0, less_than_4units
271 EXC( LOAD t0, UNIT(0)(src), l_exc)
272 EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
273 EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
274 EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
275 SUB len, len, 4*NBYTES
276 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
277 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
278 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
279 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
280 ADD src, src, 4*NBYTES
282 ADD dst, dst, 4*NBYTES
284 # Jump here if there are less than 4*NBYTES left. This means
285 # we may need to copy up to 3 NBYTES words.
288 sltu t0, len, 1*NBYTES
289 bnez t0, copy_bytes_checklen
292 # 1) Copy NBYTES, then check length again
294 EXC( LOAD t0, 0(src), l_exc)
297 EXC( STORE t0, 0(dst), s_exc_p1u)
299 bnez t1, copy_bytes_checklen
302 # 2) Copy NBYTES, then check length again
304 EXC( LOAD t0, 0(src), l_exc)
307 EXC( STORE t0, 0(dst), s_exc_p1u)
309 bnez t1, copy_bytes_checklen
312 # 3) Copy NBYTES, then check length again
314 EXC( LOAD t0, 0(src), l_exc)
318 b copy_bytes_checklen
319 EXC( STORE t0, -8(dst), s_exc_p1u)
323 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
324 beqz t0, cleanup_src_unaligned
325 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
328 * Avoid consecutive LD*'s to the same register since some mips
329 * implementations can't issue them in the same cycle.
330 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
331 * are to the same unit (unless src is aligned, but it's not).
333 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
334 EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
335 SUB len, len, 4*NBYTES
336 EXC( LDREST t0, REST(0)(src), l_exc_copy)
337 EXC( LDREST t1, REST(1)(src), l_exc_copy)
338 EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
339 EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
340 EXC( LDREST t2, REST(2)(src), l_exc_copy)
341 EXC( LDREST t3, REST(3)(src), l_exc_copy)
342 ADD src, src, 4*NBYTES
343 EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
344 EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
345 EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
346 EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
348 ADD dst, dst, 4*NBYTES
350 cleanup_src_unaligned:
352 and rem, len, NBYTES-1 # rem = len % NBYTES
353 beq rem, len, copy_bytes
356 EXC( LDFIRST t0, FIRST(0)(src), l_exc)
357 EXC( LDREST t0, REST(0)(src), l_exc_copy)
359 EXC( STORE t0, 0(dst), s_exc_p1u)
368 /* 0 < len < NBYTES */
369 #define COPY_BYTE(N) \
370 EXC( lb t0, N(src), l_exc); \
373 EXC( sb t0, N(dst), s_exc_p1)
381 EXC( lb t0, NBYTES-2(src), l_exc)
384 EXC( sb t0, NBYTES-2(dst), s_exc_p1)
391 /* Rewind src and dst by 16*NBYTES for l_exc_copy */
392 SUB src, src, 16*NBYTES
393 SUB dst, dst, 16*NBYTES
396 * Copy bytes from src until faulting load address (or until a
399 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
400 * may be more than a byte beyond the last address.
401 * Hence, the lb below may get an exception.
403 * Assumes src < THREAD_BUADDR($28)
405 LOAD t0, TI_TASK($28)
406 LOAD t0, THREAD_BUADDR(t0)
408 EXC( lb t1, 0(src), l_exc)
410 sb t1, 0(dst) # can't fault -- we're copy_from_user
414 LOAD t0, TI_TASK($28)
415 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
416 SUB len, AT, t0 # len number of uncopied bytes
417 bnez t7, 2f /* Skip the zeroing out part if inatomic */
419 * Here's where we rely on src and dst being incremented in tandem,
421 * dst += (fault addr - src) to put dst at first byte to clear
423 ADD dst, t0 # compute start address in a1
426 * Clear len bytes starting at dst. Can't call __bzero because it
427 * might modify len. An inefficient loop for these rare times...
442 ADD len, len, n*NBYTES
470 EXPORT_SYMBOL(memmove)
473 sltu t0, a1, t0 # dst + len <= src -> memcpy
474 sltu t1, a0, t1 # dst >= src + len -> memcpy
477 move v0, a0 /* return value */
481 /* fall through to __rmemcpy */
482 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
484 beqz t0, r_end_bytes_up # src >= dst
486 ADD a0, a2 # dst = dst + len
487 ADD a1, a2 # src = src + len
506 bnez a2, r_end_bytes_up