1 /* checksum.S: Sparc optimized checksum code.
3 * Copyright(C) 1995 Linus Torvalds
4 * Copyright(C) 1995 Miguel de Icaza
5 * Copyright(C) 1996 David S. Miller
6 * Copyright(C) 1997 Jakub Jelinek
9 * Linux/Alpha checksum c-code
10 * Linux/ix86 inline checksum assembly
11 * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
12 * David Mosberger-Tang for optimized reference c-code
13 * BSD4.4 portable checksum routine
16 #include <asm/errno.h>
17 #include <asm/export.h>
19 #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
20 ldd [buf + offset + 0x00], t0; \
21 ldd [buf + offset + 0x08], t2; \
22 addxcc t0, sum, sum; \
23 addxcc t1, sum, sum; \
24 ldd [buf + offset + 0x10], t4; \
25 addxcc t2, sum, sum; \
26 addxcc t3, sum, sum; \
27 ldd [buf + offset + 0x18], t0; \
28 addxcc t4, sum, sum; \
29 addxcc t5, sum, sum; \
30 addxcc t0, sum, sum; \
33 #define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
34 ldd [buf - offset - 0x08], t0; \
35 ldd [buf - offset - 0x00], t2; \
36 addxcc t0, sum, sum; \
37 addxcc t1, sum, sum; \
38 addxcc t2, sum, sum; \
41 /* Do end cruft out of band to get better cache patterns. */
42 csum_partial_end_cruft:
43 be 1f ! caller asks %o1 & 0x8
44 andcc %o1, 4, %g0 ! nope, check for word remaining
45 ldd [%o0], %g2 ! load two
46 addcc %g2, %o2, %o2 ! add first word to sum
47 addxcc %g3, %o2, %o2 ! add second word as well
48 add %o0, 8, %o0 ! advance buf ptr
49 addx %g0, %o2, %o2 ! add in final carry
50 andcc %o1, 4, %g0 ! check again for word remaining
51 1: be 1f ! nope, skip this code
52 andcc %o1, 3, %o1 ! check for trailing bytes
53 ld [%o0], %g2 ! load it
54 addcc %g2, %o2, %o2 ! add to sum
55 add %o0, 4, %o0 ! advance buf ptr
56 addx %g0, %o2, %o2 ! add in final carry
57 andcc %o1, 3, %g0 ! check again for trailing bytes
58 1: be 1f ! no trailing bytes, return
59 addcc %o1, -1, %g0 ! only one byte remains?
60 bne 2f ! at least two bytes more
61 subcc %o1, 2, %o1 ! only two bytes more?
62 b 4f ! only one byte remains
63 or %g0, %g0, %o4 ! clear fake hword value
64 2: lduh [%o0], %o4 ! get hword
65 be 6f ! jmp if only hword remains
66 add %o0, 2, %o0 ! advance buf ptr either way
67 sll %o4, 16, %o4 ! create upper hword
68 4: ldub [%o0], %o5 ! get final byte
69 sll %o5, 8, %o5 ! put into place
70 or %o5, %o4, %o4 ! coalese with hword (if any)
71 6: addcc %o4, %o2, %o2 ! add to sum
72 1: retl ! get outta here
73 addx %g0, %o2, %o0 ! add final carry into retval
75 /* Also do alignment out of band to get better cache patterns. */
76 csum_partial_fix_alignment:
82 lduh [%o0 + 0x00], %g2
95 andcc %o1, 0xffffff80, %o3
102 andcc %o1, 0xffffff80, %o3
104 /* The common case is to get called with a nicely aligned
105 * buffer of size 0x20. Follow the code path for that case.
108 EXPORT_SYMBOL(csum_partial)
109 csum_partial: /* %o0=buf, %o1=len, %o2=sum */
110 andcc %o0, 0x7, %g0 ! alignment problems?
111 bne csum_partial_fix_alignment ! yep, handle it
112 sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr
113 andcc %o1, 0xffffff80, %o3 ! num loop iterations
114 cpa: be 3f ! none to do
115 andcc %o1, 0x70, %g1 ! clears carry flag too
116 5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
117 CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
118 CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
119 CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
120 addx %g0, %o2, %o2 ! sink in final carry
121 subcc %o3, 128, %o3 ! detract from loop iters
123 add %o0, 128, %o0 ! advance buf ptr
124 andcc %o1, 0x70, %g1 ! clears carry flag too
126 andcc %o1, 0xf, %g0 ! anything left at all?
127 srl %g1, 1, %o4 ! compute offset
128 sub %g7, %g1, %g7 ! adjust jmp ptr
129 sub %g7, %o4, %g7 ! final jmp ptr adjust
130 jmp %g7 + %lo(cpte - 8) ! enter the table
131 add %o0, %g1, %o0 ! advance buf ptr
132 cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
133 CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
134 CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
135 CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
136 CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
137 CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
138 CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
139 addx %g0, %o2, %o2 ! fetch final carry
140 andcc %o1, 0xf, %g0 ! anything left at all?
141 cpte: bne csum_partial_end_cruft ! yep, handle it
142 andcc %o1, 8, %g0 ! check how much
143 cpout: retl ! get outta here
144 mov %o2, %o0 ! return computed csum
146 .globl __csum_partial_copy_start, __csum_partial_copy_end
147 __csum_partial_copy_start:
149 /* Work around cpp -rob */
151 #define EXECINSTR #execinstr
152 #define EX(x,y,a,b) \
154 .section .fixup,ALLOC,EXECINSTR; \
158 .section __ex_table,ALLOC; \
166 .section __ex_table,ALLOC; \
174 .section __ex_table,ALLOC; \
180 #define EXT(start,end,handler) \
181 .section __ex_table,ALLOC; \
183 .word start, 0, end, handler; \
187 /* This aligned version executes typically in 8.5 superscalar cycles, this
188 * is the best I can do. I say 8.5 because the final add will pair with
189 * the next ldd in the main unrolled loop. Thus the pipe is always full.
190 * If you change these macros (including order of instructions),
191 * please check the fixup code below as well.
193 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
194 ldd [src + off + 0x00], t0; \
195 ldd [src + off + 0x08], t2; \
196 addxcc t0, sum, sum; \
197 ldd [src + off + 0x10], t4; \
198 addxcc t1, sum, sum; \
199 ldd [src + off + 0x18], t6; \
200 addxcc t2, sum, sum; \
201 std t0, [dst + off + 0x00]; \
202 addxcc t3, sum, sum; \
203 std t2, [dst + off + 0x08]; \
204 addxcc t4, sum, sum; \
205 std t4, [dst + off + 0x10]; \
206 addxcc t5, sum, sum; \
207 std t6, [dst + off + 0x18]; \
208 addxcc t6, sum, sum; \
211 /* 12 superscalar cycles seems to be the limit for this case,
212 * because of this we thus do all the ldd's together to get
213 * Viking MXCC into streaming mode. Ho hum...
215 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
216 ldd [src + off + 0x00], t0; \
217 ldd [src + off + 0x08], t2; \
218 ldd [src + off + 0x10], t4; \
219 ldd [src + off + 0x18], t6; \
220 st t0, [dst + off + 0x00]; \
221 addxcc t0, sum, sum; \
222 st t1, [dst + off + 0x04]; \
223 addxcc t1, sum, sum; \
224 st t2, [dst + off + 0x08]; \
225 addxcc t2, sum, sum; \
226 st t3, [dst + off + 0x0c]; \
227 addxcc t3, sum, sum; \
228 st t4, [dst + off + 0x10]; \
229 addxcc t4, sum, sum; \
230 st t5, [dst + off + 0x14]; \
231 addxcc t5, sum, sum; \
232 st t6, [dst + off + 0x18]; \
233 addxcc t6, sum, sum; \
234 st t7, [dst + off + 0x1c]; \
237 /* Yuck, 6 superscalar cycles... */
238 #define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
239 ldd [src - off - 0x08], t0; \
240 ldd [src - off - 0x00], t2; \
241 addxcc t0, sum, sum; \
242 st t0, [dst - off - 0x08]; \
243 addxcc t1, sum, sum; \
244 st t1, [dst - off - 0x04]; \
245 addxcc t2, sum, sum; \
246 st t2, [dst - off - 0x00]; \
247 addxcc t3, sum, sum; \
248 st t3, [dst - off + 0x04];
250 /* Handle the end cruft code out of band for better cache patterns. */
254 EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf)
259 EX2(st %g2, [%o1 - 0x08])
262 EX2(st %g3, [%o1 - 0x04])
265 EX(ld [%o0 + 0x00], %g2, add %o3, 4)
268 EX2(st %g2, [%o1 - 0x04])
278 2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2)
280 EX2(sth %o4, [%o1 + 0x00])
284 4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1)
285 EX2(stb %o5, [%o1 + 0x00])
288 6: addcc %o4, %g7, %g7
292 /* Also, handle the alignment code out of band. */
300 andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
303 1: andcc %o0, 0x1, %g0
308 EX(lduh [%o0 + 0x00], %g4, add %g1, 0)
310 EX2(sth %g4, [%o1 + 0x00])
323 andcc %g1, 0xffffff80, %g0
324 EX(ld [%o0 + 0x00], %g4, add %g1, 0)
326 EX2(st %g4, [%o1 + 0x00])
332 andcc %g1, 0xffffff80, %g0
334 /* Sun, you just can't beat me, you just can't. Stop trying,
335 * give up. I'm serious, I am going to kick the living shit
336 * out of you, game over, lights out.
339 .globl __csum_partial_copy_sparc_generic
340 EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
341 __csum_partial_copy_sparc_generic:
342 /* %o0=src, %o1=dest, %g1=len, %g7=sum */
343 xor %o0, %o1, %o4 ! get changing bits
344 andcc %o4, 3, %g0 ! check for mismatched alignment
345 bne ccslow ! better this than unaligned/fixups
346 andcc %o0, 7, %g0 ! need to align things?
347 bne cc_dword_align ! yes, we check for short lengths there
348 andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?
349 3: be 3f ! nope, less than one loop remains
350 andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary?
351 be ccdbl + 4 ! 8 byte aligned, kick ass
352 5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
353 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
354 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
355 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
356 10: EXT(5b, 10b, 20f) ! note for exception handling
357 sub %g1, 128, %g1 ! detract from length
358 addx %g0, %g7, %g7 ! add in last carry bit
359 andcc %g1, 0xffffff80, %g0 ! more to csum?
360 add %o0, 128, %o0 ! advance src ptr
361 bne 5b ! we did not go negative, continue looping
362 add %o1, 128, %o1 ! advance dest ptr
363 3: andcc %g1, 0x70, %o2 ! can use table?
364 ccmerge:be ccte ! nope, go and check for end cruft
365 andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
366 srl %o2, 1, %o4 ! begin negative offset computation
367 sethi %hi(12f), %o5 ! set up table ptr end
368 add %o0, %o2, %o0 ! advance src ptr
369 sub %o5, %o4, %o5 ! continue table calculation
370 sll %o2, 1, %g2 ! constant multiplies are fun...
371 sub %o5, %g2, %o5 ! some more adjustments
372 jmp %o5 + %lo(12f) ! jump into it, duff style, wheee...
373 add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
374 cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
375 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
376 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
377 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
378 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
379 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
380 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
381 12: EXT(cctbl, 12b, 22f) ! note for exception table handling
383 andcc %o3, 0xf, %g0 ! check for low bits set
384 ccte: bne cc_end_cruft ! something left, handle it out of band
385 andcc %o3, 8, %g0 ! begin checks for that code
387 mov %g7, %o0 ! give em the computed checksum
388 ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
389 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
390 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
391 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
392 11: EXT(ccdbl, 11b, 21f) ! note for exception table handling
393 sub %g1, 128, %g1 ! detract from length
394 addx %g0, %g7, %g7 ! add in last carry bit
395 andcc %g1, 0xffffff80, %g0 ! more to csum?
396 add %o0, 128, %o0 ! advance src ptr
397 bne ccdbl ! we did not go negative, continue looping
398 add %o1, 128, %o1 ! advance dest ptr
399 b ccmerge ! finish it off, above
400 andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
409 EX(ldub [%o0], %g5, add %g1, 1)
420 EX(lduh [%o0], %o4, add %g1, 0)
426 EX2(stb %o4, [%o1 + 1])
438 EX2(stb %g3, [%o1 + 1])
440 EX2(stb %g2, [%o1 + 2])
442 EX2(stb %o4, [%o1 + 3])
443 addx %g5, %g0, %g5 ! I am now to lazy to optimize this (question it
444 add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
445 subcc %g4, 1, %g4 ! tricks
455 EX(lduh [%o0], %o4, and %g1, 3)
461 EX2(stb %o4, [%o1 + 1])
465 EX(ldub [%o0], %g2, add %g0, 1)
470 1: addcc %o4, %g5, %g5
480 4: addcc %g7, %g5, %g7
483 __csum_partial_copy_end:
485 /* We do these strange calculations for the csum_*_from_user case only, ie.
486 * we only bother with faults on loads... */
488 /* o2 = ((g2%20)&3)*8
489 * o3 = g1 - (g2/20)*32 - o2 */
502 /* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
503 * o3 = g1 - (g2/16)*32 - o2 */
518 /* o0 += (g2/10)*16 - 0x70
519 * 01 += (g2/10)*16 - 0x70
520 * o2 = (g2 % 10) ? 8 : 0
521 * o3 += 0x70 - (g2/10)*16 - o2 */
547 * %o3 is # bytes to zero out
548 * %o4 is faulting address
549 * %o5 is %pc where fault occurred */
554 * %o2 is # of bytes to copy from src to dst
555 * %o3 is # bytes to zero out
556 * %o4 is faulting address
557 * %o5 is %pc where fault occurred */
566 add %g0, -EFAULT, %i5
584 ld [%sp + 168], %o2 ! struct_ptr of parent
589 .section __ex_table,#alloc