2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Quick'n'dirty IP checksum ...
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
13 #include <linux/errno.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/export.h>
17 #include <asm/regdef.h>
21 * As we are sharing code base with the mips32 tree (which use the o32 ABI
22 * register definitions). We need to redefine the register definitions from
23 * the n64 ABI register naming to the o32 ABI register naming.
55 #endif /* USE_DOUBLE */
57 #define UNIT(unit) ((unit)*NBYTES)
59 #define ADDC(sum,reg) \
67 #define ADDC32(sum,reg) \
75 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
76 LOAD _t0, (offset + UNIT(0))(src); \
77 LOAD _t1, (offset + UNIT(1))(src); \
78 LOAD _t2, (offset + UNIT(2))(src); \
79 LOAD _t3, (offset + UNIT(3))(src); \
86 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
87 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
89 #define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3) \
90 CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3); \
91 CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
96 * a1: length of the area to checksum
97 * a2: partial checksum
107 EXPORT_SYMBOL(csum_partial)
112 bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */
115 andi t7, src, 0x1 /* odd buffer? */
118 beqz t7, .Lword_align
122 LONG_SUBU a1, a1, 0x1
127 PTR_ADDU src, src, 0x1
131 beqz t8, .Ldword_align
135 LONG_SUBU a1, a1, 0x2
138 PTR_ADDU src, src, 0x2
141 bnez t8, .Ldo_end_words
145 beqz t8, .Lqword_align
149 LONG_SUBU a1, a1, 0x4
151 PTR_ADDU src, src, 0x4
155 beqz t8, .Loword_align
160 LONG_SUBU a1, a1, 0x8
165 LONG_SUBU a1, a1, 0x8
169 PTR_ADDU src, src, 0x8
173 beqz t8, .Lbegin_movement
182 CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
184 LONG_SUBU a1, a1, 0x10
185 PTR_ADDU src, src, 0x10
193 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
194 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
195 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
196 CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
197 LONG_SUBU t8, t8, 0x01
198 .set reorder /* DADDI_WAR */
199 PTR_ADDU src, src, 0x80
200 bnez t8, .Lmove_128bytes
208 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
209 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
210 PTR_ADDU src, src, 0x40
213 beqz t2, .Ldo_end_words
217 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
219 PTR_ADDU src, src, 0x20
222 beqz t8, .Lsmall_csumcpy
228 LONG_SUBU t8, t8, 0x1
230 .set reorder /* DADDI_WAR */
231 PTR_ADDU src, src, 0x4
235 /* unknown src alignment and < 8 bytes to go */
243 /* Still a full word to go */
247 dsll t1, t1, 32 /* clear lower 32bit */
255 /* Still a halfword to go */
281 /* odd buffer alignment? */
282 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
283 defined(CONFIG_CPU_LOONGSON64)
290 beqz t7, 1f /* odd buffer alignment? */
301 /* Add the passed partial csum. */
309 * checksum and copy routines based on memcpy.S
311 * csum_partial_copy_nocheck(src, dst, len, sum)
312 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
314 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
315 * function in this file use the standard calling convention.
327 * The exception handler for loads requires that:
328 * 1- AT contain the address of the byte just past the end of the source
330 * 2- src_entry <= src < AT, and
331 * 3- (dst - src) == (dst_entry - src_entry),
332 * The _entry suffix denotes values when __copy_user was called.
334 * (1) is set up up by __csum_partial_copy_from_user and maintained by
335 * not writing AT in __csum_partial_copy
336 * (2) is met by incrementing src by the number of bytes copied
337 * (3) is met by not doing loads between a pair of increments of dst and src
339 * The exception handlers for stores stores -EFAULT to errptr and return.
340 * These handlers do not need to overwrite any data.
343 /* Instruction type */
346 #define LEGACY_MODE 1
352 * Wrapper to add an entry in the exception table
353 * in case the insn causes a memory exception.
355 * insn : Load/store instruction
356 * type : Instruction type
359 * handler : Exception handler
361 #define EXC(insn, type, reg, addr, handler) \
362 .if \mode == LEGACY_MODE; \
364 .section __ex_table,"a"; \
367 /* This is enabled in EVA mode */ \
369 /* If loading from user or storing to user */ \
370 .if ((\from == USEROP) && (type == LD_INSN)) || \
371 ((\to == USEROP) && (type == ST_INSN)); \
372 9: __BUILD_EVA_INSN(insn##e, reg, addr); \
373 .section __ex_table,"a"; \
377 /* EVA without exception */ \
386 #define LOADK ld /* No exception */
387 #define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
388 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
389 #define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
390 #define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
391 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
392 #define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
393 #define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
394 #define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
406 #define LOADK lw /* No exception */
407 #define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
408 #define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
409 #define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
410 #define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
411 #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
412 #define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
413 #define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
414 #define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
424 #endif /* USE_DOUBLE */
426 #ifdef CONFIG_CPU_LITTLE_ENDIAN
427 #define LDFIRST LOADR
429 #define STFIRST STORER
430 #define STREST STOREL
431 #define SHIFT_DISCARD SLLV
432 #define SHIFT_DISCARD_REVERT SRLV
434 #define LDFIRST LOADL
436 #define STFIRST STOREL
437 #define STREST STORER
438 #define SHIFT_DISCARD SRLV
439 #define SHIFT_DISCARD_REVERT SLLV
442 #define FIRST(unit) ((unit)*NBYTES)
443 #define REST(unit) (FIRST(unit)+NBYTES-1)
445 #define ADDRMASK (NBYTES-1)
447 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
453 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
455 PTR_ADDU AT, src, len /* See (1) above. */
456 /* initialize __nocheck if this the first time we execute this
465 FEXPORT(csum_partial_copy_nocheck)
466 EXPORT_SYMBOL(csum_partial_copy_nocheck)
471 * Note: dst & src may be unaligned, len may be 0
475 * The "issue break"s below are very approximate.
476 * Issue delays for dcache fills will perturb the schedule, as will
477 * load queue full replay traps, etc.
479 * If len < NBYTES use byte operations.
482 and t1, dst, ADDRMASK
483 bnez t2, .Lcopy_bytes_checklen\@
484 and t0, src, ADDRMASK
485 andi odd, dst, 0x1 /* odd buffer? */
486 bnez t1, .Ldst_unaligned\@
488 bnez t0, .Lsrc_unaligned_dst_aligned\@
490 * use delay slot for fall-through
491 * src and dst are aligned; need to compute rem
494 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
495 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
497 SUB len, 8*NBYTES # subtract here for bgez loop
500 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
501 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
502 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
503 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
504 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
505 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
506 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
507 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
508 SUB len, len, 8*NBYTES
509 ADD src, src, 8*NBYTES
510 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
512 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
514 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
516 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
518 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
520 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
522 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
524 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
526 .set reorder /* DADDI_WAR */
527 ADD dst, dst, 8*NBYTES
530 ADD len, 8*NBYTES # revert len (see above)
533 * len == the number of bytes left to copy < 8*NBYTES
535 .Lcleanup_both_aligned\@:
538 sltu t0, len, 4*NBYTES
539 bnez t0, .Lless_than_4units\@
540 and rem, len, (NBYTES-1) # rem = len % NBYTES
544 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
545 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
546 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
547 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
548 SUB len, len, 4*NBYTES
549 ADD src, src, 4*NBYTES
550 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
552 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
554 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
556 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
558 .set reorder /* DADDI_WAR */
559 ADD dst, dst, 4*NBYTES
562 .Lless_than_4units\@:
566 beq rem, len, .Lcopy_bytes\@
569 LOAD(t0, 0(src), .Ll_exc\@)
572 STORE(t0, 0(dst), .Ls_exc\@)
574 .set reorder /* DADDI_WAR */
580 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
581 * A loop would do only a byte at a time with possible branch
582 * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
583 * because can't assume read-access to dst. Instead, use
584 * STREST dst, which doesn't require read access to dst.
586 * This code should perform better than a simple loop on modern,
587 * wide-issue mips processors because the code has fewer branches and
588 * more instruction-level parallelism.
592 ADD t1, dst, len # t1 is just past last byte of dst
594 SLL rem, len, 3 # rem = number of bits to keep
595 LOAD(t0, 0(src), .Ll_exc\@)
596 SUB bits, bits, rem # bits = number of bits to discard
597 SHIFT_DISCARD t0, t0, bits
598 STREST(t0, -1(t1), .Ls_exc\@)
599 SHIFT_DISCARD_REVERT t0, t0, bits
607 * t0 = src & ADDRMASK
608 * t1 = dst & ADDRMASK; T1 > 0
611 * Copy enough bytes to align dst
612 * Set match = (src and dst have same alignment)
615 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
617 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
618 SUB t2, t2, t1 # t2 = number of bytes copied
620 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
621 SLL t4, t1, 3 # t4 = number of bits to discard
622 SHIFT_DISCARD t3, t3, t4
623 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
625 beq len, t2, .Ldone\@
628 beqz match, .Lboth_aligned\@
631 .Lsrc_unaligned_dst_aligned\@:
632 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
633 beqz t0, .Lcleanup_src_unaligned\@
634 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
637 * Avoid consecutive LD*'s to the same register since some mips
638 * implementations can't issue them in the same cycle.
639 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
640 * are to the same unit (unless src is aligned, but it's not).
642 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
643 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
644 SUB len, len, 4*NBYTES
645 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
646 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
647 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
648 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
649 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
650 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
651 ADD src, src, 4*NBYTES
652 #ifdef CONFIG_CPU_SB1
653 nop # improves slotting
655 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
657 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
659 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
661 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
663 .set reorder /* DADDI_WAR */
664 ADD dst, dst, 4*NBYTES
668 .Lcleanup_src_unaligned\@:
670 and rem, len, NBYTES-1 # rem = len % NBYTES
671 beq rem, len, .Lcopy_bytes\@
674 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
675 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
678 STORE(t0, 0(dst), .Ls_exc\@)
680 .set reorder /* DADDI_WAR */
685 .Lcopy_bytes_checklen\@:
689 /* 0 < len < NBYTES */
690 #ifdef CONFIG_CPU_LITTLE_ENDIAN
691 #define SHIFT_START 0
694 #define SHIFT_START 8*(NBYTES-1)
697 move t2, zero # partial word
698 li t3, SHIFT_START # shift
699 /* use .Ll_exc_copy here to return correct sum on fault */
700 #define COPY_BYTE(N) \
701 LOADBU(t0, N(src), .Ll_exc_copy\@); \
703 STOREB(t0, N(dst), .Ls_exc\@); \
705 addu t3, SHIFT_INC; \
706 beqz len, .Lcopy_bytes_done\@; \
717 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
719 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
736 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
737 defined(CONFIG_CPU_LOONGSON64)
744 beqz odd, 1f /* odd buffer alignment? */
762 * Copy bytes from src until faulting load address (or until a
765 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
766 * may be more than a byte beyond the last address.
767 * Hence, the lb below may get an exception.
769 * Assumes src < THREAD_BUADDR($28)
771 LOADK t0, TI_TASK($28)
773 LOADK t0, THREAD_BUADDR(t0)
775 LOADBU(t1, 0(src), .Ll_exc\@)
777 sb t1, 0(dst) # can't fault -- we're copy_from_user
781 .set reorder /* DADDI_WAR */
786 LOADK t0, TI_TASK($28)
788 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
790 SUB len, AT, t0 # len number of uncopied bytes
792 * Here's where we rely on src and dst being incremented in tandem,
794 * dst += (fault addr - src) to put dst at first byte to clear
796 ADD dst, t0 # compute start address in a1
799 * Clear len bytes starting at dst. Can't call __bzero because it
800 * might modify len. An inefficient loop for these rare times...
802 .set reorder /* DADDI_WAR */
810 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
823 li v0, -1 /* invalid checksum */
830 LEAF(__csum_partial_copy_kernel)
831 EXPORT_SYMBOL(__csum_partial_copy_kernel)
833 FEXPORT(__csum_partial_copy_to_user)
834 EXPORT_SYMBOL(__csum_partial_copy_to_user)
835 FEXPORT(__csum_partial_copy_from_user)
836 EXPORT_SYMBOL(__csum_partial_copy_from_user)
838 __BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
839 END(__csum_partial_copy_kernel)
842 LEAF(__csum_partial_copy_to_user)
843 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
844 END(__csum_partial_copy_to_user)
846 LEAF(__csum_partial_copy_from_user)
847 __BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
848 END(__csum_partial_copy_from_user)