3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
9 # Permission to use under GPL terms is granted.
10 # ====================================================================
12 # SHA256 block procedure for ARMv4. May 2007.
14 # Performance is ~2x better than gcc 3.4 generated code and in "abso-
15 # lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
16 # byte [on single-issue Xscale PXA250 core].
20 # Rescheduling for dual-issue pipeline resulted in 22% improvement on
21 # Cortex A8 core and ~20 cycles per processed byte.
25 # Profiler-assisted and platform-specific optimization resulted in 16%
26 # improvement on Cortex A8 core and ~15.4 cycles per processed byte.
30 # Add NEON implementation. On Cortex A8 it was measured to process one
31 # byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
32 # S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
33 # code (meaning that latter performs sub-optimally, nothing was done
38 # Add ARMv8 code path performing at 2.0 cpb on Apple A7.
40 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
41 open STDOUT,">$output";
55 @V=($A,$B,$C,$D,$E,$F,$G,$H);
65 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
67 $code.=<<___ if ($i<16);
69 @ ldr $t1,[$inp],#4 @ $i
71 str $inp,[sp,#17*4] @ make room for $t4
73 eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
74 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
75 eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
80 @ ldrb $t1,[$inp,#3] @ $i
81 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
86 orr $t1,$t1,$t0,lsl#16
88 str $inp,[sp,#17*4] @ make room for $t4
90 eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
91 orr $t1,$t1,$t2,lsl#24
92 eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
96 ldr $t2,[$Ktbl],#4 @ *K256++
97 add $h,$h,$t1 @ h+=X[i]
98 str $t1,[sp,#`$i%16`*4]
100 add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e)
102 add $h,$h,$t2 @ h+=K256[i]
103 eor $t1,$t1,$g @ Ch(e,f,g)
104 eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
105 add $h,$h,$t1 @ h+=Ch(e,f,g)
108 cmp $t2,#0xf2 @ done?
112 ldr $t1,[$inp],#4 @ prefetch
116 eor $t2,$a,$b @ a^b, b^c in next round
118 ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
119 eor $t2,$a,$b @ a^b, b^c in next round
120 ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
122 eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a)
123 and $t3,$t3,$t2 @ (b^c)&=(a^b)
125 eor $t3,$t3,$b @ Maj(a,b,c)
126 add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a)
127 @ add $h,$h,$t3 @ h+=Maj(a,b,c)
133 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
136 @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
137 @ ldr $t4,[sp,#`($i+14)%16`*4]
138 mov $t0,$t1,ror#$sigma0[0]
139 add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
140 mov $t2,$t4,ror#$sigma1[0]
141 eor $t0,$t0,$t1,ror#$sigma0[1]
142 eor $t2,$t2,$t4,ror#$sigma1[1]
143 eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
144 ldr $t1,[sp,#`($i+0)%16`*4]
145 eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
146 ldr $t4,[sp,#`($i+9)%16`*4]
149 eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15
151 eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
152 add $t1,$t1,$t4 @ X[i]
159 # include "arm_arch.h"
161 # define __ARM_ARCH__ __LINUX_ARM_ARCH__
162 # define __ARM_MAX_ARCH__ 7
181 .word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
182 .word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
183 .word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
184 .word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
185 .word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
186 .word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
187 .word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
188 .word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
189 .word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
190 .word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
191 .word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
192 .word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
193 .word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
194 .word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
195 .word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
196 .word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
199 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
201 .word OPENSSL_armcap_P-sha256_block_data_order
205 .global sha256_block_data_order
206 .type sha256_block_data_order,%function
207 sha256_block_data_order:
208 .Lsha256_block_data_order:
210 sub r3,pc,#8 @ sha256_block_data_order
212 adr r3,.Lsha256_block_data_order
214 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
215 ldr r12,.LOPENSSL_armcap
216 ldr r12,[r3,r12] @ OPENSSL_armcap_P
217 tst r12,#ARMV8_SHA256
222 add $len,$inp,$len,lsl#6 @ len to point at the end of inp
223 stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
224 ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
225 sub $Ktbl,r3,#256+32 @ K256
226 sub sp,sp,#16*4 @ alloca(X[16])
233 eor $t3,$B,$C @ magic
236 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
237 $code.=".Lrounds_16_xx:\n";
238 for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
241 ite eq @ Thumb2 thing, sanity check in ARM
243 ldreq $t3,[sp,#16*4] @ pull ctx
246 add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
261 ldr $inp,[sp,#17*4] @ pull inp
262 ldr $t2,[sp,#18*4] @ pull inp+len
265 stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
267 sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
270 add sp,sp,#`16+3`*4 @ destroy frame
272 ldmia sp!,{r4-r11,pc}
274 ldmia sp!,{r4-r11,lr}
276 moveq pc,lr @ be binary compatible with V4, yet
277 bx lr @ interoperable with Thumb ISA:-)
279 .size sha256_block_data_order,.-sha256_block_data_order
281 ######################################################################
285 my @X=map("q$_",(0..3));
286 my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
290 sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
291 sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
293 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
294 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
296 $arg = "#$arg" if ($arg*1 eq $arg);
297 $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
303 my @insns = (&$body,&$body,&$body,&$body);
304 my ($a,$b,$c,$d,$e,$f,$g,$h);
306 &vext_8 ($T0,@X[0],@X[1],4); # X[1..4]
310 &vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
314 &vshr_u32 ($T2,$T0,$sigma0[0]);
317 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
320 &vshr_u32 ($T1,$T0,$sigma0[2]);
323 &vsli_32 ($T2,$T0,32-$sigma0[0]);
326 &vshr_u32 ($T3,$T0,$sigma0[1]);
332 &vsli_32 ($T3,$T0,32-$sigma0[1]);
335 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]);
338 &veor ($T1,$T1,$T3); # sigma0(X[1..4])
341 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]);
344 &vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]);
347 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
353 &vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]);
356 &vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]);
359 &veor ($T5,$T5,$T4); # sigma1(X[14..15])
362 &vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
365 &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]);
368 &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]);
371 &vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]);
377 &vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]);
380 &vld1_32 ("{$T0}","[$Ktbl,:128]!");
383 &vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]);
386 &veor ($T5,$T5,$T4); # sigma1(X[16..17])
389 &vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
392 &vadd_i32 ($T0,$T0,@X[0]);
393 while($#insns>=2) { eval(shift(@insns)); }
394 &vst1_32 ("{$T0}","[$Xfer,:128]!");
398 push(@X,shift(@X)); # "rotate" X[]
404 my @insns = (&$body,&$body,&$body,&$body);
405 my ($a,$b,$c,$d,$e,$f,$g,$h);
411 &vld1_32 ("{$T0}","[$Ktbl,:128]!");
416 &vrev32_8 (@X[0],@X[0]);
421 &vadd_i32 ($T0,$T0,@X[0]);
422 foreach (@insns) { eval; } # remaining instructions
423 &vst1_32 ("{$T0}","[$Xfer,:128]!");
425 push(@X,shift(@X)); # "rotate" X[]
430 '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
431 '&add ($h,$h,$t1)', # h+=X[i]+K[i]
433 '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
434 '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
436 '&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
437 '&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
438 '&eor ($t1,$t1,$g)', # Ch(e,f,g)
439 '&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e)
440 '&eor ($t2,$a,$b)', # a^b, b^c in next round
441 '&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
442 '&add ($h,$h,$t1)', # h+=Ch(e,f,g)
443 '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
444 '&ldr ($t1,"[$Ktbl]") if ($j==15);'.
445 '&ldr ($t1,"[sp,#64]") if ($j==31)',
446 '&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
447 '&add ($d,$d,$h)', # d+=h
448 '&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a)
449 '&eor ($t3,$t3,$b)', # Maj(a,b,c)
450 '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
455 #if __ARM_MAX_ARCH__>=7
459 .global sha256_block_data_order_neon
460 .type sha256_block_data_order_neon,%function
462 sha256_block_data_order_neon:
464 stmdb sp!,{r4-r12,lr}
468 bic $H,$H,#15 @ align for 128-bit stores
471 add $len,$inp,$len,lsl#6 @ len to point at the end of inp
473 vld1.8 {@X[0]},[$inp]!
474 vld1.8 {@X[1]},[$inp]!
475 vld1.8 {@X[2]},[$inp]!
476 vld1.8 {@X[3]},[$inp]!
477 vld1.32 {$T0},[$Ktbl,:128]!
478 vld1.32 {$T1},[$Ktbl,:128]!
479 vld1.32 {$T2},[$Ktbl,:128]!
480 vld1.32 {$T3},[$Ktbl,:128]!
481 vrev32.8 @X[0],@X[0] @ yes, even on
483 vrev32.8 @X[1],@X[1] @ big-endian
489 str $t2,[sp,#76] @ save original sp
490 vadd.i32 $T0,$T0,@X[0]
491 vadd.i32 $T1,$T1,@X[1]
492 vst1.32 {$T0},[$Xfer,:128]!
493 vadd.i32 $T2,$T2,@X[2]
494 vst1.32 {$T1},[$Xfer,:128]!
495 vadd.i32 $T3,$T3,@X[3]
496 vst1.32 {$T2},[$Xfer,:128]!
497 vst1.32 {$T3},[$Xfer,:128]!
509 &Xupdate(\&body_00_15);
510 &Xupdate(\&body_00_15);
511 &Xupdate(\&body_00_15);
512 &Xupdate(\&body_00_15);
514 teq $t1,#0 @ check for K256 terminator
521 sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl
524 subeq $inp,$inp,#64 @ avoid SEGV
525 vld1.8 {@X[0]},[$inp]! @ load next input block
526 vld1.8 {@X[1]},[$inp]!
527 vld1.8 {@X[2]},[$inp]!
528 vld1.8 {@X[3]},[$inp]!
533 &Xpreload(\&body_00_15);
534 &Xpreload(\&body_00_15);
535 &Xpreload(\&body_00_15);
536 &Xpreload(\&body_00_15);
539 add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
543 add $A,$A,$t0 @ accumulate
565 ldreq sp,[sp,#76] @ restore original sp
570 ldmia sp!,{r4-r12,pc}
571 .size sha256_block_data_order_neon,.-sha256_block_data_order_neon
575 ######################################################################
579 my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
580 my @MSG=map("q$_",(8..11));
581 my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
585 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
588 # define INST(a,b,c,d) .byte c,d|0xc,a,b
590 # define INST(a,b,c,d) .byte a,b,c,d
593 .type sha256_block_data_order_armv8,%function
595 sha256_block_data_order_armv8:
597 vld1.32 {$ABCD,$EFGH},[$ctx]
600 sub $Ktbl,$Ktbl,#.LARMv8-K256
604 add $len,$inp,$len,lsl#6 @ len to point at the end of inp
607 vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
608 vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
609 vld1.32 {$W0},[$Ktbl]!
610 vrev32.8 @MSG[0],@MSG[0]
611 vrev32.8 @MSG[1],@MSG[1]
612 vrev32.8 @MSG[2],@MSG[2]
613 vrev32.8 @MSG[3],@MSG[3]
614 vmov $ABCD_SAVE,$ABCD @ offload
615 vmov $EFGH_SAVE,$EFGH
618 for($i=0;$i<12;$i++) {
620 vld1.32 {$W1},[$Ktbl]!
621 vadd.i32 $W0,$W0,@MSG[0]
622 sha256su0 @MSG[0],@MSG[1]
624 sha256h $ABCD,$EFGH,$W0
625 sha256h2 $EFGH,$abcd,$W0
626 sha256su1 @MSG[0],@MSG[2],@MSG[3]
628 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
631 vld1.32 {$W1},[$Ktbl]!
632 vadd.i32 $W0,$W0,@MSG[0]
634 sha256h $ABCD,$EFGH,$W0
635 sha256h2 $EFGH,$abcd,$W0
637 vld1.32 {$W0},[$Ktbl]!
638 vadd.i32 $W1,$W1,@MSG[1]
640 sha256h $ABCD,$EFGH,$W1
641 sha256h2 $EFGH,$abcd,$W1
643 vld1.32 {$W1},[$Ktbl]
644 vadd.i32 $W0,$W0,@MSG[2]
645 sub $Ktbl,$Ktbl,#256-16 @ rewind
647 sha256h $ABCD,$EFGH,$W0
648 sha256h2 $EFGH,$abcd,$W0
650 vadd.i32 $W1,$W1,@MSG[3]
652 sha256h $ABCD,$EFGH,$W1
653 sha256h2 $EFGH,$abcd,$W1
655 vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
656 vadd.i32 $EFGH,$EFGH,$EFGH_SAVE
660 vst1.32 {$ABCD,$EFGH},[$ctx]
663 .size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
668 .asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
670 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
671 .comm OPENSSL_armcap_P,4,4
678 last if (!s/^#/@/ and !/^$/);
684 "sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40,
685 "sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 );
688 my ($mnemonic,$arg)=@_;
690 if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
691 my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
692 |(($2&7)<<17)|(($2&8)<<4)
693 |(($3&7)<<1) |(($3&8)<<2);
694 # since ARMv7 instructions are always encoded little-endian.
695 # correct solution is to use .inst directive, but older
696 # assemblers don't implement it:-(
697 sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
698 $word&0xff,($word>>8)&0xff,
699 ($word>>16)&0xff,($word>>24)&0xff,
705 foreach (split($/,$code)) {
707 s/\`([^\`]*)\`/eval $1/geo;
709 s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
711 s/\bret\b/bx lr/go or
712 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
717 close STDOUT; # enforce flush