1 #include <linux/linkage.h>
2 #include <asm-generic/export.h>
6 .macro fixup op reg addr lbl
9 .section __ex_table,"a"
15 ENTRY(__asm_copy_to_user)
16 ENTRY(__asm_copy_from_user)
18 /* Enable access to user memory */
22 /* Save for return value */
26 * Register allocation for code below:
27 * a0 - start of uncopied dst
28 * a1 - start of uncopied src
30 * t0 - end of uncopied dst
35 * Use byte copy only if too small.
36 * SZREG holds 4 for RV32 and 8 for RV64
38 li a3, 9*SZREG /* size must be larger than size in word_copy */
39 bltu a2, a3, .Lbyte_copy_tail
42 * Copy first bytes until dst is aligned to word boundary.
44 * t1 - start of aligned dst
47 andi t1, t1, ~(SZREG-1)
48 /* dst is already aligned, skip */
49 beq a0, t1, .Lskip_align_dst
51 /* a5 - one byte for copying data */
52 fixup lb a5, 0(a1), 10f
53 addi a1, a1, 1 /* src */
54 fixup sb a5, 0(a0), 10f
55 addi a0, a0, 1 /* dst */
56 bltu a0, t1, 1b /* t1 - start of aligned dst */
61 * Use shift-copy if src is misaligned.
62 * Use word-copy if both src and dst are aligned because
63 * can not use shift-copy which do not require shifting
65 /* a1 - start of src */
71 * Both src and dst are aligned, unrolled word copy
73 * a0 - start of aligned dst
74 * a1 - start of aligned src
75 * t0 - end of aligned dst
77 addi t0, t0, -(8*SZREG) /* not to over run */
79 fixup REG_L a4, 0(a1), 10f
80 fixup REG_L a5, SZREG(a1), 10f
81 fixup REG_L a6, 2*SZREG(a1), 10f
82 fixup REG_L a7, 3*SZREG(a1), 10f
83 fixup REG_L t1, 4*SZREG(a1), 10f
84 fixup REG_L t2, 5*SZREG(a1), 10f
85 fixup REG_L t3, 6*SZREG(a1), 10f
86 fixup REG_L t4, 7*SZREG(a1), 10f
87 fixup REG_S a4, 0(a0), 10f
88 fixup REG_S a5, SZREG(a0), 10f
89 fixup REG_S a6, 2*SZREG(a0), 10f
90 fixup REG_S a7, 3*SZREG(a0), 10f
91 fixup REG_S t1, 4*SZREG(a0), 10f
92 fixup REG_S t2, 5*SZREG(a0), 10f
93 fixup REG_S t3, 6*SZREG(a0), 10f
94 fixup REG_S t4, 7*SZREG(a0), 10f
99 addi t0, t0, 8*SZREG /* revert to original value */
105 * Word copy with shifting.
106 * For misaligned copy we still perform aligned word copy, but
107 * we need to use the value fetched from the previous iteration and
109 * This is safe because reading is less than a word size.
111 * a0 - start of aligned dst
113 * a3 - a1 & mask:(SZREG-1)
114 * t0 - end of uncopied dst
115 * t1 - end of aligned dst
117 /* calculating aligned word boundary for dst */
118 andi t1, t0, ~(SZREG-1)
119 /* Converting unaligned src to aligned src */
120 andi a1, a1, ~(SZREG-1)
127 slli t3, a3, 3 /* converting bytes in a3 to bits */
131 /* Load the first word to combine with second word */
132 fixup REG_L a5, 0(a1), 10f
135 /* Main shifting copy
137 * a0 - start of aligned dst
138 * a1 - start of aligned src
139 * t1 - end of aligned dst
142 /* At least one iteration will be executed */
144 fixup REG_L a5, SZREG(a1), 10f
148 fixup REG_S a2, 0(a0), 10f
152 /* Revert src to original unaligned value */
157 * Byte copy anything left.
159 * a0 - start of remaining dst
160 * a1 - start of remaining src
161 * t0 - end of remaining dst
163 bgeu a0, t0, .Lout_copy_user /* check if end of copy */
165 fixup lb a5, 0(a1), 10f
166 addi a1, a1, 1 /* src */
167 fixup sb a5, 0(a0), 10f
168 addi a0, a0, 1 /* dst */
169 bltu a0, t0, 4b /* t0 - end of dst */
172 /* Disable access to user memory */
176 ENDPROC(__asm_copy_to_user)
177 ENDPROC(__asm_copy_from_user)
178 EXPORT_SYMBOL(__asm_copy_to_user)
179 EXPORT_SYMBOL(__asm_copy_from_user)
184 /* Enable access to user memory */
190 andi t1, a3, ~(SZREG-1)
191 andi t0, t0, ~(SZREG-1)
193 * a3: terminal address of target region
194 * t0: lowest doubleword-aligned address in target region
195 * t1: highest doubleword-aligned address in target region
200 fixup REG_S, zero, (a0), 11f
207 /* Disable access to user memory */
211 4: /* Edge case: unalignment */
212 fixup sb, zero, (a0), 11f
216 5: /* Edge case: remainder */
217 fixup sb, zero, (a0), 11f
221 ENDPROC(__clear_user)
222 EXPORT_SYMBOL(__clear_user)
226 /* Fixup code for __copy_user(10) and __clear_user(11) */
228 /* Disable access to user memory */