1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_X86_XOR_32_H
3 #define _ASM_X86_XOR_32_H
6 * Optimized RAID-5 checksumming functions for MMX.
10 * High-speed RAID5 checksumming functions utilizing MMX instructions.
11 * Copyright (C) 1998 Ingo Molnar.
14 #define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
15 #define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
16 #define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
17 #define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
18 #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
19 #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
21 #include <asm/fpu/api.h>
24 xor_pII_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
25 const unsigned long * __restrict p2)
27 unsigned long lines = bytes >> 7;
68 xor_pII_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
69 const unsigned long * __restrict p2,
70 const unsigned long * __restrict p3)
72 unsigned long lines = bytes >> 7;
110 "+r" (p1), "+r" (p2), "+r" (p3)
118 xor_pII_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
119 const unsigned long * __restrict p2,
120 const unsigned long * __restrict p3,
121 const unsigned long * __restrict p4)
123 unsigned long lines = bytes >> 7;
166 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
175 xor_pII_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
176 const unsigned long * __restrict p2,
177 const unsigned long * __restrict p3,
178 const unsigned long * __restrict p4,
179 const unsigned long * __restrict p5)
181 unsigned long lines = bytes >> 7;
185 /* Make sure GCC forgets anything it knows about p4 or p5,
186 such that it won't pass to the asm volatile below a
187 register that is shared with any other variable. That's
188 because we modify p4 and p5 there, but we can't mark them
189 as read/write, otherwise we'd overflow the 10-asm-operands
190 limit of GCC < 3.1. */
191 asm("" : "+r" (p4), "+r" (p5));
237 "+r" (p1), "+r" (p2), "+r" (p3)
241 /* p4 and p5 were modified, and now the variables are dead.
242 Clobber them just to be sure nobody does something stupid
243 like assuming they have some legal value. */
244 asm("" : "=r" (p4), "=r" (p5));
258 xor_p5_mmx_2(unsigned long bytes, unsigned long * __restrict p1,
259 const unsigned long * __restrict p2)
261 unsigned long lines = bytes >> 6;
268 " movq (%1), %%mm0 ;\n"
269 " movq 8(%1), %%mm1 ;\n"
270 " pxor (%2), %%mm0 ;\n"
271 " movq 16(%1), %%mm2 ;\n"
272 " movq %%mm0, (%1) ;\n"
273 " pxor 8(%2), %%mm1 ;\n"
274 " movq 24(%1), %%mm3 ;\n"
275 " movq %%mm1, 8(%1) ;\n"
276 " pxor 16(%2), %%mm2 ;\n"
277 " movq 32(%1), %%mm4 ;\n"
278 " movq %%mm2, 16(%1) ;\n"
279 " pxor 24(%2), %%mm3 ;\n"
280 " movq 40(%1), %%mm5 ;\n"
281 " movq %%mm3, 24(%1) ;\n"
282 " pxor 32(%2), %%mm4 ;\n"
283 " movq 48(%1), %%mm6 ;\n"
284 " movq %%mm4, 32(%1) ;\n"
285 " pxor 40(%2), %%mm5 ;\n"
286 " movq 56(%1), %%mm7 ;\n"
287 " movq %%mm5, 40(%1) ;\n"
288 " pxor 48(%2), %%mm6 ;\n"
289 " pxor 56(%2), %%mm7 ;\n"
290 " movq %%mm6, 48(%1) ;\n"
291 " movq %%mm7, 56(%1) ;\n"
306 xor_p5_mmx_3(unsigned long bytes, unsigned long * __restrict p1,
307 const unsigned long * __restrict p2,
308 const unsigned long * __restrict p3)
310 unsigned long lines = bytes >> 6;
315 " .align 32,0x90 ;\n"
317 " movq (%1), %%mm0 ;\n"
318 " movq 8(%1), %%mm1 ;\n"
319 " pxor (%2), %%mm0 ;\n"
320 " movq 16(%1), %%mm2 ;\n"
321 " pxor 8(%2), %%mm1 ;\n"
322 " pxor (%3), %%mm0 ;\n"
323 " pxor 16(%2), %%mm2 ;\n"
324 " movq %%mm0, (%1) ;\n"
325 " pxor 8(%3), %%mm1 ;\n"
326 " pxor 16(%3), %%mm2 ;\n"
327 " movq 24(%1), %%mm3 ;\n"
328 " movq %%mm1, 8(%1) ;\n"
329 " movq 32(%1), %%mm4 ;\n"
330 " movq 40(%1), %%mm5 ;\n"
331 " pxor 24(%2), %%mm3 ;\n"
332 " movq %%mm2, 16(%1) ;\n"
333 " pxor 32(%2), %%mm4 ;\n"
334 " pxor 24(%3), %%mm3 ;\n"
335 " pxor 40(%2), %%mm5 ;\n"
336 " movq %%mm3, 24(%1) ;\n"
337 " pxor 32(%3), %%mm4 ;\n"
338 " pxor 40(%3), %%mm5 ;\n"
339 " movq 48(%1), %%mm6 ;\n"
340 " movq %%mm4, 32(%1) ;\n"
341 " movq 56(%1), %%mm7 ;\n"
342 " pxor 48(%2), %%mm6 ;\n"
343 " movq %%mm5, 40(%1) ;\n"
344 " pxor 56(%2), %%mm7 ;\n"
345 " pxor 48(%3), %%mm6 ;\n"
346 " pxor 56(%3), %%mm7 ;\n"
347 " movq %%mm6, 48(%1) ;\n"
348 " movq %%mm7, 56(%1) ;\n"
356 "+r" (p1), "+r" (p2), "+r" (p3)
364 xor_p5_mmx_4(unsigned long bytes, unsigned long * __restrict p1,
365 const unsigned long * __restrict p2,
366 const unsigned long * __restrict p3,
367 const unsigned long * __restrict p4)
369 unsigned long lines = bytes >> 6;
374 " .align 32,0x90 ;\n"
376 " movq (%1), %%mm0 ;\n"
377 " movq 8(%1), %%mm1 ;\n"
378 " pxor (%2), %%mm0 ;\n"
379 " movq 16(%1), %%mm2 ;\n"
380 " pxor 8(%2), %%mm1 ;\n"
381 " pxor (%3), %%mm0 ;\n"
382 " pxor 16(%2), %%mm2 ;\n"
383 " pxor 8(%3), %%mm1 ;\n"
384 " pxor (%4), %%mm0 ;\n"
385 " movq 24(%1), %%mm3 ;\n"
386 " pxor 16(%3), %%mm2 ;\n"
387 " pxor 8(%4), %%mm1 ;\n"
388 " movq %%mm0, (%1) ;\n"
389 " movq 32(%1), %%mm4 ;\n"
390 " pxor 24(%2), %%mm3 ;\n"
391 " pxor 16(%4), %%mm2 ;\n"
392 " movq %%mm1, 8(%1) ;\n"
393 " movq 40(%1), %%mm5 ;\n"
394 " pxor 32(%2), %%mm4 ;\n"
395 " pxor 24(%3), %%mm3 ;\n"
396 " movq %%mm2, 16(%1) ;\n"
397 " pxor 40(%2), %%mm5 ;\n"
398 " pxor 32(%3), %%mm4 ;\n"
399 " pxor 24(%4), %%mm3 ;\n"
400 " movq %%mm3, 24(%1) ;\n"
401 " movq 56(%1), %%mm7 ;\n"
402 " movq 48(%1), %%mm6 ;\n"
403 " pxor 40(%3), %%mm5 ;\n"
404 " pxor 32(%4), %%mm4 ;\n"
405 " pxor 48(%2), %%mm6 ;\n"
406 " movq %%mm4, 32(%1) ;\n"
407 " pxor 56(%2), %%mm7 ;\n"
408 " pxor 40(%4), %%mm5 ;\n"
409 " pxor 48(%3), %%mm6 ;\n"
410 " pxor 56(%3), %%mm7 ;\n"
411 " movq %%mm5, 40(%1) ;\n"
412 " pxor 48(%4), %%mm6 ;\n"
413 " pxor 56(%4), %%mm7 ;\n"
414 " movq %%mm6, 48(%1) ;\n"
415 " movq %%mm7, 56(%1) ;\n"
424 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
432 xor_p5_mmx_5(unsigned long bytes, unsigned long * __restrict p1,
433 const unsigned long * __restrict p2,
434 const unsigned long * __restrict p3,
435 const unsigned long * __restrict p4,
436 const unsigned long * __restrict p5)
438 unsigned long lines = bytes >> 6;
442 /* Make sure GCC forgets anything it knows about p4 or p5,
443 such that it won't pass to the asm volatile below a
444 register that is shared with any other variable. That's
445 because we modify p4 and p5 there, but we can't mark them
446 as read/write, otherwise we'd overflow the 10-asm-operands
447 limit of GCC < 3.1. */
448 asm("" : "+r" (p4), "+r" (p5));
451 " .align 32,0x90 ;\n"
453 " movq (%1), %%mm0 ;\n"
454 " movq 8(%1), %%mm1 ;\n"
455 " pxor (%2), %%mm0 ;\n"
456 " pxor 8(%2), %%mm1 ;\n"
457 " movq 16(%1), %%mm2 ;\n"
458 " pxor (%3), %%mm0 ;\n"
459 " pxor 8(%3), %%mm1 ;\n"
460 " pxor 16(%2), %%mm2 ;\n"
461 " pxor (%4), %%mm0 ;\n"
462 " pxor 8(%4), %%mm1 ;\n"
463 " pxor 16(%3), %%mm2 ;\n"
464 " movq 24(%1), %%mm3 ;\n"
465 " pxor (%5), %%mm0 ;\n"
466 " pxor 8(%5), %%mm1 ;\n"
467 " movq %%mm0, (%1) ;\n"
468 " pxor 16(%4), %%mm2 ;\n"
469 " pxor 24(%2), %%mm3 ;\n"
470 " movq %%mm1, 8(%1) ;\n"
471 " pxor 16(%5), %%mm2 ;\n"
472 " pxor 24(%3), %%mm3 ;\n"
473 " movq 32(%1), %%mm4 ;\n"
474 " movq %%mm2, 16(%1) ;\n"
475 " pxor 24(%4), %%mm3 ;\n"
476 " pxor 32(%2), %%mm4 ;\n"
477 " movq 40(%1), %%mm5 ;\n"
478 " pxor 24(%5), %%mm3 ;\n"
479 " pxor 32(%3), %%mm4 ;\n"
480 " pxor 40(%2), %%mm5 ;\n"
481 " movq %%mm3, 24(%1) ;\n"
482 " pxor 32(%4), %%mm4 ;\n"
483 " pxor 40(%3), %%mm5 ;\n"
484 " movq 48(%1), %%mm6 ;\n"
485 " movq 56(%1), %%mm7 ;\n"
486 " pxor 32(%5), %%mm4 ;\n"
487 " pxor 40(%4), %%mm5 ;\n"
488 " pxor 48(%2), %%mm6 ;\n"
489 " pxor 56(%2), %%mm7 ;\n"
490 " movq %%mm4, 32(%1) ;\n"
491 " pxor 48(%3), %%mm6 ;\n"
492 " pxor 56(%3), %%mm7 ;\n"
493 " pxor 40(%5), %%mm5 ;\n"
494 " pxor 48(%4), %%mm6 ;\n"
495 " pxor 56(%4), %%mm7 ;\n"
496 " movq %%mm5, 40(%1) ;\n"
497 " pxor 48(%5), %%mm6 ;\n"
498 " pxor 56(%5), %%mm7 ;\n"
499 " movq %%mm6, 48(%1) ;\n"
500 " movq %%mm7, 56(%1) ;\n"
510 "+r" (p1), "+r" (p2), "+r" (p3)
514 /* p4 and p5 were modified, and now the variables are dead.
515 Clobber them just to be sure nobody does something stupid
516 like assuming they have some legal value. */
517 asm("" : "=r" (p4), "=r" (p5));
522 static struct xor_block_template xor_block_pII_mmx = {
524 .do_2 = xor_pII_mmx_2,
525 .do_3 = xor_pII_mmx_3,
526 .do_4 = xor_pII_mmx_4,
527 .do_5 = xor_pII_mmx_5,
530 static struct xor_block_template xor_block_p5_mmx = {
532 .do_2 = xor_p5_mmx_2,
533 .do_3 = xor_p5_mmx_3,
534 .do_4 = xor_p5_mmx_4,
535 .do_5 = xor_p5_mmx_5,
538 static struct xor_block_template xor_block_pIII_sse = {
546 /* Also try the AVX routines */
547 #include <asm/xor_avx.h>
549 /* Also try the generic routines. */
550 #include <asm-generic/xor.h>
552 /* We force the use of the SSE xor block because it can write around L2.
553 We may also be able to load into the L1 only depending on how the cpu
554 deals with a load to a line that is being prefetched. */
555 #undef XOR_TRY_TEMPLATES
556 #define XOR_TRY_TEMPLATES \
559 if (boot_cpu_has(X86_FEATURE_XMM)) { \
560 xor_speed(&xor_block_pIII_sse); \
561 xor_speed(&xor_block_sse_pf64); \
562 } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
563 xor_speed(&xor_block_pII_mmx); \
564 xor_speed(&xor_block_p5_mmx); \
566 xor_speed(&xor_block_8regs); \
567 xor_speed(&xor_block_8regs_p); \
568 xor_speed(&xor_block_32regs); \
569 xor_speed(&xor_block_32regs_p); \
573 #endif /* _ASM_X86_XOR_32_H */