1 /* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <linux/linkage.h>
9 #include <asm/visasm.h>
11 #include <asm/export.h>
12 #define GLOBAL_SPARE g7
14 #define GLOBAL_SPARE g5
15 #define ASI_BLK_P 0xf0
18 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
19 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
20 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
22 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
23 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
31 #define EX_LD_FP(x,y) x
38 #define EX_ST_FP(x,y) x
42 #define LOAD(type,addr,dest) type [addr], dest
46 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
50 #define STORE(type,src,addr) type src, [addr]
54 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
58 #define FUNC_NAME memcpy
69 #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
70 faligndata %f1, %f2, %f48; \
71 faligndata %f2, %f3, %f50; \
72 faligndata %f3, %f4, %f52; \
73 faligndata %f4, %f5, %f54; \
74 faligndata %f5, %f6, %f56; \
75 faligndata %f6, %f7, %f58; \
76 faligndata %f7, %f8, %f60; \
77 faligndata %f8, %f9, %f62;
79 #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \
80 EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \
81 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
82 add %src, 0x40, %src; \
83 subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \
85 add %dest, 0x40, %dest; \
87 #define LOOP_CHUNK1(src, dest, branch_dest) \
88 MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest)
89 #define LOOP_CHUNK2(src, dest, branch_dest) \
90 MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest)
91 #define LOOP_CHUNK3(src, dest, branch_dest) \
92 MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest)
94 #define DO_SYNC membar #Sync;
95 #define STORE_SYNC(dest, fsrc) \
96 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \
97 add %dest, 0x40, %dest; \
100 #define STORE_JUMP(dest, fsrc, target) \
101 EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \
102 add %dest, 0x40, %dest; \
103 ba,pt %xcc, target; \
106 #define FINISH_VISCHUNK(dest, f0, f1) \
109 faligndata %f0, %f1, %f48; \
110 EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \
113 #define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
118 #define UNEVEN_VISCHUNK(dest, f0, f1) \
119 UNEVEN_VISCHUNK_LAST(dest, f0, f1) \
122 .register %g2,#scratch
123 .register %g3,#scratch
127 #define EX_RETVAL(x) x
148 add %GLOBAL_SPARE, %g3, %o0
154 add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
155 add %GLOBAL_SPARE, %g3, %o0
161 add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE
162 add %GLOBAL_SPARE, %g3, %o0
190 add %GLOBAL_SPARE, %o2, %o0
194 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
196 add %GLOBAL_SPARE, 0x8, %o0
200 add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE
202 add %GLOBAL_SPARE, 0x10, %o0
229 ENTRY(U1_gs_0_o2_adj)
232 add %GLOBAL_SPARE, %o2, %o0
233 ENDPROC(U1_gs_0_o2_adj)
234 ENTRY(U1_gs_8_o2_adj)
236 add %GLOBAL_SPARE, 8, %GLOBAL_SPARE
238 add %GLOBAL_SPARE, %o2, %o0
239 ENDPROC(U1_gs_8_o2_adj)
245 .type FUNC_NAME,#function
246 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
263 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
266 /* Is 'dst' already aligned on an 64-byte boundary? */
270 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
271 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
272 * subtract this from 'len'.
274 sub %o0, %o1, %GLOBAL_SPARE
282 1: subcc %g1, 0x1, %g1
283 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp)
284 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp)
288 add %o1, %GLOBAL_SPARE, %o0
293 alignaddr %o1, %g0, %o1
295 EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp)
296 1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp)
299 faligndata %f4, %f6, %f0
300 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
304 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp)
307 faligndata %f6, %f4, %f0
308 EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp)
312 /* Destination is 64-byte aligned. */
314 membar #LoadStore | #StoreStore | #StoreLoad
316 subcc %o2, 0x40, %GLOBAL_SPARE
318 andncc %GLOBAL_SPARE, (0x40 - 1), %GLOBAL_SPARE
320 sub %o2, %GLOBAL_SPARE, %g3
321 andn %o1, (0x40 - 1), %o1
326 sub %o2, %GLOBAL_SPARE, %o2
328 add %g1, %GLOBAL_SPARE, %g1
331 EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp)
334 EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp)
336 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
337 EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp)
340 /* There are 8 instances of the unrolled loop,
341 * one for each possible alignment of the
342 * source buffer. Each loop instance is 452
351 add %o3, %lo(1f - 1b), %o3
356 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
357 LOOP_CHUNK1(o1, o0, 1f)
358 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
359 LOOP_CHUNK2(o1, o0, 2f)
360 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
361 LOOP_CHUNK3(o1, o0, 3f)
363 faligndata %f0, %f2, %f48
364 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
366 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
367 STORE_JUMP(o0, f48, 40f)
368 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
370 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
371 STORE_JUMP(o0, f48, 48f)
372 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
374 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
375 STORE_JUMP(o0, f48, 56f)
377 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
378 LOOP_CHUNK1(o1, o0, 1f)
379 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
380 LOOP_CHUNK2(o1, o0, 2f)
381 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
382 LOOP_CHUNK3(o1, o0, 3f)
384 faligndata %f2, %f4, %f48
385 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
387 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
388 STORE_JUMP(o0, f48, 41f)
389 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
391 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
392 STORE_JUMP(o0, f48, 49f)
393 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
395 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
396 STORE_JUMP(o0, f48, 57f)
398 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
399 LOOP_CHUNK1(o1, o0, 1f)
400 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
401 LOOP_CHUNK2(o1, o0, 2f)
402 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
403 LOOP_CHUNK3(o1, o0, 3f)
405 faligndata %f4, %f6, %f48
406 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
408 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
409 STORE_JUMP(o0, f48, 42f)
410 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
412 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
413 STORE_JUMP(o0, f48, 50f)
414 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
416 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
417 STORE_JUMP(o0, f48, 58f)
419 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
420 LOOP_CHUNK1(o1, o0, 1f)
421 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
422 LOOP_CHUNK2(o1, o0, 2f)
423 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
424 LOOP_CHUNK3(o1, o0, 3f)
426 faligndata %f6, %f8, %f48
427 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
429 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
430 STORE_JUMP(o0, f48, 43f)
431 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
433 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
434 STORE_JUMP(o0, f48, 51f)
435 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
437 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
438 STORE_JUMP(o0, f48, 59f)
440 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
441 LOOP_CHUNK1(o1, o0, 1f)
442 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
443 LOOP_CHUNK2(o1, o0, 2f)
444 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
445 LOOP_CHUNK3(o1, o0, 3f)
447 faligndata %f8, %f10, %f48
448 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
450 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
451 STORE_JUMP(o0, f48, 44f)
452 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
454 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
455 STORE_JUMP(o0, f48, 52f)
456 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
458 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
459 STORE_JUMP(o0, f48, 60f)
461 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
462 LOOP_CHUNK1(o1, o0, 1f)
463 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
464 LOOP_CHUNK2(o1, o0, 2f)
465 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
466 LOOP_CHUNK3(o1, o0, 3f)
468 faligndata %f10, %f12, %f48
469 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
471 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
472 STORE_JUMP(o0, f48, 45f)
473 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
475 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
476 STORE_JUMP(o0, f48, 53f)
477 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
479 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
480 STORE_JUMP(o0, f48, 61f)
482 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
483 LOOP_CHUNK1(o1, o0, 1f)
484 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
485 LOOP_CHUNK2(o1, o0, 2f)
486 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
487 LOOP_CHUNK3(o1, o0, 3f)
489 faligndata %f12, %f14, %f48
490 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
492 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
493 STORE_JUMP(o0, f48, 46f)
494 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
496 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
497 STORE_JUMP(o0, f48, 54f)
498 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
500 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
501 STORE_JUMP(o0, f48, 62f)
503 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
504 LOOP_CHUNK1(o1, o0, 1f)
505 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
506 LOOP_CHUNK2(o1, o0, 2f)
507 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
508 LOOP_CHUNK3(o1, o0, 3f)
510 faligndata %f14, %f16, %f48
511 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
513 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
514 STORE_JUMP(o0, f48, 47f)
515 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
517 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
518 STORE_JUMP(o0, f48, 55f)
519 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
521 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
522 STORE_JUMP(o0, f48, 63f)
524 40: FINISH_VISCHUNK(o0, f0, f2)
525 41: FINISH_VISCHUNK(o0, f2, f4)
526 42: FINISH_VISCHUNK(o0, f4, f6)
527 43: FINISH_VISCHUNK(o0, f6, f8)
528 44: FINISH_VISCHUNK(o0, f8, f10)
529 45: FINISH_VISCHUNK(o0, f10, f12)
530 46: FINISH_VISCHUNK(o0, f12, f14)
531 47: UNEVEN_VISCHUNK(o0, f14, f0)
532 48: FINISH_VISCHUNK(o0, f16, f18)
533 49: FINISH_VISCHUNK(o0, f18, f20)
534 50: FINISH_VISCHUNK(o0, f20, f22)
535 51: FINISH_VISCHUNK(o0, f22, f24)
536 52: FINISH_VISCHUNK(o0, f24, f26)
537 53: FINISH_VISCHUNK(o0, f26, f28)
538 54: FINISH_VISCHUNK(o0, f28, f30)
539 55: UNEVEN_VISCHUNK(o0, f30, f0)
540 56: FINISH_VISCHUNK(o0, f32, f34)
541 57: FINISH_VISCHUNK(o0, f34, f36)
542 58: FINISH_VISCHUNK(o0, f36, f38)
543 59: FINISH_VISCHUNK(o0, f38, f40)
544 60: FINISH_VISCHUNK(o0, f40, f42)
545 61: FINISH_VISCHUNK(o0, f42, f44)
546 62: FINISH_VISCHUNK(o0, f44, f46)
547 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0)
549 93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp)
552 faligndata %f0, %f2, %f8
553 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
556 EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp)
559 faligndata %f2, %f0, %f8
560 EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp)
567 1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp)
570 EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp)
574 2: membar #StoreLoad | #StoreStore
577 mov EX_RETVAL(%o4), %o0
580 70: /* 16 < len <= (5 * 64) */
584 72: andn %o2, 0xf, %GLOBAL_SPARE
586 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0)
587 EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0)
588 subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE
589 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10)
591 EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8)
594 73: andcc %o2, 0x8, %g0
597 EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0)
599 EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8)
601 1: andcc %o2, 0x4, %g0
604 EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0)
606 EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4)
614 75: andcc %o0, 0x7, %g1
620 1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0)
622 EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1)
638 EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0)
640 andn %o2, 0x7, %GLOBAL_SPARE
642 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj)
643 subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE
647 EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj)
660 80: /* 0 < len <= 16 */
665 1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0)
667 EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4)
672 mov EX_RETVAL(%o4), %o0
675 90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0)
677 EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1)
681 mov EX_RETVAL(%o4), %o0
683 .size FUNC_NAME, .-FUNC_NAME
684 EXPORT_SYMBOL(FUNC_NAME)