1 /* copy_user.S: Sparc optimized copy_from_user and copy_to_user code.
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1996 David S. Miller
5 * Copyright(C) 1996 Eddie C. Dost
6 * Copyright(C) 1996,1998 Jakub Jelinek
9 * e-mail between David and Eddie.
11 * Returns 0 if successful, otherwise count of bytes not copied yet
14 #include <asm/ptrace.h>
15 #include <asm/asmmacro.h>
17 #include <asm/thread_info.h>
18 #include <asm/export.h>
20 /* Work around cpp -rob */
22 #define EXECINSTR #execinstr
25 .section .fixup,ALLOC,EXECINSTR; \
29 .section __ex_table,ALLOC; \
35 #define EX2(x,y,c,d,e,a,b) \
37 .section .fixup,ALLOC,EXECINSTR; \
42 .section __ex_table,ALLOC; \
50 .section __ex_table,ALLOC; \
56 #define EXT(start,end,handler) \
57 .section __ex_table,ALLOC; \
59 .word start, 0, end, handler; \
63 /* Please do not change following macros unless you change logic used
64 * in .fixup at the end of this file as well
67 /* Both these macros have to start with exactly the same insn */
68 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
69 ldd [%src + (offset) + 0x00], %t0; \
70 ldd [%src + (offset) + 0x08], %t2; \
71 ldd [%src + (offset) + 0x10], %t4; \
72 ldd [%src + (offset) + 0x18], %t6; \
73 st %t0, [%dst + (offset) + 0x00]; \
74 st %t1, [%dst + (offset) + 0x04]; \
75 st %t2, [%dst + (offset) + 0x08]; \
76 st %t3, [%dst + (offset) + 0x0c]; \
77 st %t4, [%dst + (offset) + 0x10]; \
78 st %t5, [%dst + (offset) + 0x14]; \
79 st %t6, [%dst + (offset) + 0x18]; \
80 st %t7, [%dst + (offset) + 0x1c];
82 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
83 ldd [%src + (offset) + 0x00], %t0; \
84 ldd [%src + (offset) + 0x08], %t2; \
85 ldd [%src + (offset) + 0x10], %t4; \
86 ldd [%src + (offset) + 0x18], %t6; \
87 std %t0, [%dst + (offset) + 0x00]; \
88 std %t2, [%dst + (offset) + 0x08]; \
89 std %t4, [%dst + (offset) + 0x10]; \
90 std %t6, [%dst + (offset) + 0x18];
92 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
93 ldd [%src - (offset) - 0x10], %t0; \
94 ldd [%src - (offset) - 0x08], %t2; \
95 st %t0, [%dst - (offset) - 0x10]; \
96 st %t1, [%dst - (offset) - 0x0c]; \
97 st %t2, [%dst - (offset) - 0x08]; \
98 st %t3, [%dst - (offset) - 0x04];
100 #define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
101 lduh [%src + (offset) + 0x00], %t0; \
102 lduh [%src + (offset) + 0x02], %t1; \
103 lduh [%src + (offset) + 0x04], %t2; \
104 lduh [%src + (offset) + 0x06], %t3; \
105 sth %t0, [%dst + (offset) + 0x00]; \
106 sth %t1, [%dst + (offset) + 0x02]; \
107 sth %t2, [%dst + (offset) + 0x04]; \
108 sth %t3, [%dst + (offset) + 0x06];
110 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
111 ldub [%src - (offset) - 0x02], %t0; \
112 ldub [%src - (offset) - 0x01], %t1; \
113 stb %t0, [%dst - (offset) - 0x02]; \
114 stb %t1, [%dst - (offset) - 0x01];
119 .globl __copy_user_begin
123 EXPORT_SYMBOL(__copy_user)
129 EXO2(ldub [%o1], %g2)
136 EXO2(lduh [%o1], %g2)
143 EXO2(lduh [%o1], %g2)
150 __copy_user: /* %o0=dst %o1=src %o2=len */
158 bleu short_aligned_end
174 andcc %g1, 0xffffff80, %g7
180 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
181 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
182 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
183 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
192 be copy_user_table_end
195 sethi %hi(copy_user_table_end), %o5
200 jmpl %o5 + %lo(copy_user_table_end), %g0
204 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
205 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
206 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
207 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
208 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
209 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
210 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
212 EXT(copy_user_table, copy_user_table_end, 51f)
216 EX(ldd [%o1], %g2, and %g1, 0xf)
219 EX(st %g2, [%o0 - 0x08], and %g1, 0xf)
220 EX2(st %g3, [%o0 - 0x04], and %g1, 0xf, %g1, sub %g1, 4)
225 EX(ld [%o1], %g2, and %g1, 7)
227 EX(st %g2, [%o0], and %g1, 7)
233 EX(lduh [%o1], %g2, and %g1, 3)
235 EX(sth %g2, [%o0], and %g1, 3)
241 EX(ldub [%o1], %g2, add %g0, 1)
242 EX(stb %g2, [%o0], add %g0, 1)
248 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
249 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
250 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
251 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
253 EXT(ldd_std, 81b, 52f)
260 be copy_user_table_end
263 sethi %hi(copy_user_table_end), %o5
268 jmpl %o5 + %lo(copy_user_table_end), %g0
276 and %o2, 0xfffffff0, %o3
282 EXO2(ldub [%o1], %g2)
286 andcc %o2, 0xfffffff0, %o3
290 MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
291 MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
302 MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
303 MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
304 MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
305 MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
306 MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
307 MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
308 MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
309 MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
311 EXT(byte_chunk, 83b, 54f)
320 sethi %hi(short_table_end), %o5
325 jmpl %o5 + %lo(short_table_end), %g0
328 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
329 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
330 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
331 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
332 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
333 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
334 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
336 EXT(84b, short_table_end, 55f)
339 EX(ldub [%o1], %g2, add %g0, 1)
340 EX(stb %g2, [%o0], add %g0, 1)
352 EXO2(ld [%o1 + 0x00], %g2)
353 EXO2(ld [%o1 + 0x04], %g3)
355 EXO2(st %g2, [%o0 + 0x00])
356 EX(st %g3, [%o0 + 0x04], sub %o2, 4)
362 .section .fixup,#alloc,#execinstr
367 sethi %hi(PAGE_OFFSET), %g1
372 ld [%g6 + TI_PREEMPT], %g1
384 /* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
386 /* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
387 * happens. This is derived from the amount ldd reads, st stores, etc.
389 * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
390 * o0 += (g2 / 12) * 32;
411 60: and %g1, 0x7f, %g3
417 /* i = 41 - g2; j = i % 6;
418 * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
419 * o0 -= (i / 6) * 16 + 16;
439 /* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
440 o0 += (g2 / 8) * 32 */
452 /* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
466 /* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
467 o0 += (g2 / 4) * 2 */
481 g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
482 o0 -= i / 4 * 2 + 1 */
497 .globl __copy_user_end