GNU Linux-libre 4.9.304-gnu1
[releases.git] / arch / arm / crypto / sha512-core.S_shipped
1
2 @ ====================================================================
3 @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
4 @ project. The module is, however, dual licensed under OpenSSL and
5 @ CRYPTOGAMS licenses depending on where you obtain it. For further
6 @ details see http://www.openssl.org/~appro/cryptogams/.
7 @
8 @ Permission to use under GPL terms is granted.
9 @ ====================================================================
10
11 @ SHA512 block procedure for ARMv4. September 2007.
12
13 @ This code is ~4.5 (four and a half) times faster than code generated
14 @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
15 @ Xscale PXA250 core].
16 @
17 @ July 2010.
18 @
19 @ Rescheduling for dual-issue pipeline resulted in 6% improvement on
20 @ Cortex A8 core and ~40 cycles per processed byte.
21
22 @ February 2011.
23 @
24 @ Profiler-assisted and platform-specific optimization resulted in 7%
25 @ improvement on Coxtex A8 core and ~38 cycles per byte.
26
27 @ March 2011.
28 @
29 @ Add NEON implementation. On Cortex A8 it was measured to process
30 @ one byte in 23.3 cycles or ~60% faster than integer-only code.
31
32 @ August 2012.
33 @
34 @ Improve NEON performance by 12% on Snapdragon S4. In absolute
35 @ terms it's 22.6 cycles per byte, which is disappointing result.
36 @ Technical writers asserted that 3-way S4 pipeline can sustain
37 @ multiple NEON instructions per cycle, but dual NEON issue could
38 @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
39 @ for further details. On side note Cortex-A15 processes one byte in
40 @ 16 cycles.
41
42 @ Byte order [in]dependence. =========================================
43 @
44 @ Originally caller was expected to maintain specific *dword* order in
45 @ h[0-7], namely with most significant dword at *lower* address, which
46 @ was reflected in below two parameters as 0 and 4. Now caller is
47 @ expected to maintain native byte order for whole 64-bit values.
48 #ifndef __KERNEL__
49 # include "arm_arch.h"
50 # define VFP_ABI_PUSH   vstmdb  sp!,{d8-d15}
51 # define VFP_ABI_POP    vldmia  sp!,{d8-d15}
52 #else
53 # define __ARM_ARCH__ __LINUX_ARM_ARCH__
54 # define __ARM_MAX_ARCH__ 7
55 # define VFP_ABI_PUSH
56 # define VFP_ABI_POP
57 #endif
58
59 #ifdef __ARMEL__
60 # define LO 0
61 # define HI 4
62 # define WORD64(hi0,lo0,hi1,lo1)        .word   lo0,hi0, lo1,hi1
63 #else
64 # define HI 0
65 # define LO 4
66 # define WORD64(hi0,lo0,hi1,lo1)        .word   hi0,lo0, hi1,lo1
67 #endif
68
69 .text
70 #if __ARM_ARCH__<7
71 .code   32
72 #else
73 .syntax unified
74 # ifdef __thumb2__
75 #  define adrl adr
76 .thumb
77 # else
78 .code   32
79 # endif
80 #endif
81
82 .type   K512,%object
83 .align  5
84 K512:
85 WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
86 WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
87 WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
88 WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
89 WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
90 WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
91 WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
92 WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
93 WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
94 WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
95 WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
96 WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
97 WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
98 WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
99 WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
100 WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
101 WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
102 WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
103 WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
104 WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
105 WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
106 WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
107 WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
108 WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
109 WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
110 WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
111 WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
112 WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
113 WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
114 WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
115 WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
116 WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
117 WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
118 WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
119 WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
120 WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
121 WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
122 WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
123 WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
124 WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
125 .size   K512,.-K512
126 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
127 .LOPENSSL_armcap:
128 .word   OPENSSL_armcap_P-sha512_block_data_order
129 .skip   32-4
130 #else
131 .skip   32
132 #endif
133
134 .global sha512_block_data_order
135 .type   sha512_block_data_order,%function
136 sha512_block_data_order:
137 .Lsha512_block_data_order:
138 #if __ARM_ARCH__<7
139         sub     r3,pc,#8                @ sha512_block_data_order
140 #else
141         adr     r3,.Lsha512_block_data_order
142 #endif
143 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
144         ldr     r12,.LOPENSSL_armcap
145         ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
146         tst     r12,#1
147         bne     .LNEON
148 #endif
149         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
150         stmdb   sp!,{r4-r12,lr}
151         sub     r14,r3,#672             @ K512
152         sub     sp,sp,#9*8
153
154         ldr     r7,[r0,#32+LO]
155         ldr     r8,[r0,#32+HI]
156         ldr     r9, [r0,#48+LO]
157         ldr     r10, [r0,#48+HI]
158         ldr     r11, [r0,#56+LO]
159         ldr     r12, [r0,#56+HI]
160 .Loop:
161         str     r9, [sp,#48+0]
162         str     r10, [sp,#48+4]
163         str     r11, [sp,#56+0]
164         str     r12, [sp,#56+4]
165         ldr     r5,[r0,#0+LO]
166         ldr     r6,[r0,#0+HI]
167         ldr     r3,[r0,#8+LO]
168         ldr     r4,[r0,#8+HI]
169         ldr     r9, [r0,#16+LO]
170         ldr     r10, [r0,#16+HI]
171         ldr     r11, [r0,#24+LO]
172         ldr     r12, [r0,#24+HI]
173         str     r3,[sp,#8+0]
174         str     r4,[sp,#8+4]
175         str     r9, [sp,#16+0]
176         str     r10, [sp,#16+4]
177         str     r11, [sp,#24+0]
178         str     r12, [sp,#24+4]
179         ldr     r3,[r0,#40+LO]
180         ldr     r4,[r0,#40+HI]
181         str     r3,[sp,#40+0]
182         str     r4,[sp,#40+4]
183
184 .L00_15:
185 #if __ARM_ARCH__<7
186         ldrb    r3,[r1,#7]
187         ldrb    r9, [r1,#6]
188         ldrb    r10, [r1,#5]
189         ldrb    r11, [r1,#4]
190         ldrb    r4,[r1,#3]
191         ldrb    r12, [r1,#2]
192         orr     r3,r3,r9,lsl#8
193         ldrb    r9, [r1,#1]
194         orr     r3,r3,r10,lsl#16
195         ldrb    r10, [r1],#8
196         orr     r3,r3,r11,lsl#24
197         orr     r4,r4,r12,lsl#8
198         orr     r4,r4,r9,lsl#16
199         orr     r4,r4,r10,lsl#24
200 #else
201         ldr     r3,[r1,#4]
202         ldr     r4,[r1],#8
203 #ifdef __ARMEL__
204         rev     r3,r3
205         rev     r4,r4
206 #endif
207 #endif
208         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
209         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
210         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
211         mov     r9,r7,lsr#14
212         str     r3,[sp,#64+0]
213         mov     r10,r8,lsr#14
214         str     r4,[sp,#64+4]
215         eor     r9,r9,r8,lsl#18
216         ldr     r11,[sp,#56+0]  @ h.lo
217         eor     r10,r10,r7,lsl#18
218         ldr     r12,[sp,#56+4]  @ h.hi
219         eor     r9,r9,r7,lsr#18
220         eor     r10,r10,r8,lsr#18
221         eor     r9,r9,r8,lsl#14
222         eor     r10,r10,r7,lsl#14
223         eor     r9,r9,r8,lsr#9
224         eor     r10,r10,r7,lsr#9
225         eor     r9,r9,r7,lsl#23
226         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
227         adds    r3,r3,r9
228         ldr     r9,[sp,#40+0]   @ f.lo
229         adc     r4,r4,r10               @ T += Sigma1(e)
230         ldr     r10,[sp,#40+4]  @ f.hi
231         adds    r3,r3,r11
232         ldr     r11,[sp,#48+0]  @ g.lo
233         adc     r4,r4,r12               @ T += h
234         ldr     r12,[sp,#48+4]  @ g.hi
235
236         eor     r9,r9,r11
237         str     r7,[sp,#32+0]
238         eor     r10,r10,r12
239         str     r8,[sp,#32+4]
240         and     r9,r9,r7
241         str     r5,[sp,#0+0]
242         and     r10,r10,r8
243         str     r6,[sp,#0+4]
244         eor     r9,r9,r11
245         ldr     r11,[r14,#LO]   @ K[i].lo
246         eor     r10,r10,r12             @ Ch(e,f,g)
247         ldr     r12,[r14,#HI]   @ K[i].hi
248
249         adds    r3,r3,r9
250         ldr     r7,[sp,#24+0]   @ d.lo
251         adc     r4,r4,r10               @ T += Ch(e,f,g)
252         ldr     r8,[sp,#24+4]   @ d.hi
253         adds    r3,r3,r11
254         and     r9,r11,#0xff
255         adc     r4,r4,r12               @ T += K[i]
256         adds    r7,r7,r3
257         ldr     r11,[sp,#8+0]   @ b.lo
258         adc     r8,r8,r4                @ d += T
259         teq     r9,#148
260
261         ldr     r12,[sp,#16+0]  @ c.lo
262 #if __ARM_ARCH__>=7
263         it      eq                      @ Thumb2 thing, sanity check in ARM
264 #endif
265         orreq   r14,r14,#1
266         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
267         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
268         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
269         mov     r9,r5,lsr#28
270         mov     r10,r6,lsr#28
271         eor     r9,r9,r6,lsl#4
272         eor     r10,r10,r5,lsl#4
273         eor     r9,r9,r6,lsr#2
274         eor     r10,r10,r5,lsr#2
275         eor     r9,r9,r5,lsl#30
276         eor     r10,r10,r6,lsl#30
277         eor     r9,r9,r6,lsr#7
278         eor     r10,r10,r5,lsr#7
279         eor     r9,r9,r5,lsl#25
280         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
281         adds    r3,r3,r9
282         and     r9,r5,r11
283         adc     r4,r4,r10               @ T += Sigma0(a)
284
285         ldr     r10,[sp,#8+4]   @ b.hi
286         orr     r5,r5,r11
287         ldr     r11,[sp,#16+4]  @ c.hi
288         and     r5,r5,r12
289         and     r12,r6,r10
290         orr     r6,r6,r10
291         orr     r5,r5,r9                @ Maj(a,b,c).lo
292         and     r6,r6,r11
293         adds    r5,r5,r3
294         orr     r6,r6,r12               @ Maj(a,b,c).hi
295         sub     sp,sp,#8
296         adc     r6,r6,r4                @ h += T
297         tst     r14,#1
298         add     r14,r14,#8
299         tst     r14,#1
300         beq     .L00_15
301         ldr     r9,[sp,#184+0]
302         ldr     r10,[sp,#184+4]
303         bic     r14,r14,#1
304 .L16_79:
305         @ sigma0(x)     (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
306         @ LO            lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
307         @ HI            hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
308         mov     r3,r9,lsr#1
309         ldr     r11,[sp,#80+0]
310         mov     r4,r10,lsr#1
311         ldr     r12,[sp,#80+4]
312         eor     r3,r3,r10,lsl#31
313         eor     r4,r4,r9,lsl#31
314         eor     r3,r3,r9,lsr#8
315         eor     r4,r4,r10,lsr#8
316         eor     r3,r3,r10,lsl#24
317         eor     r4,r4,r9,lsl#24
318         eor     r3,r3,r9,lsr#7
319         eor     r4,r4,r10,lsr#7
320         eor     r3,r3,r10,lsl#25
321
322         @ sigma1(x)     (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
323         @ LO            lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
324         @ HI            hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
325         mov     r9,r11,lsr#19
326         mov     r10,r12,lsr#19
327         eor     r9,r9,r12,lsl#13
328         eor     r10,r10,r11,lsl#13
329         eor     r9,r9,r12,lsr#29
330         eor     r10,r10,r11,lsr#29
331         eor     r9,r9,r11,lsl#3
332         eor     r10,r10,r12,lsl#3
333         eor     r9,r9,r11,lsr#6
334         eor     r10,r10,r12,lsr#6
335         ldr     r11,[sp,#120+0]
336         eor     r9,r9,r12,lsl#26
337
338         ldr     r12,[sp,#120+4]
339         adds    r3,r3,r9
340         ldr     r9,[sp,#192+0]
341         adc     r4,r4,r10
342
343         ldr     r10,[sp,#192+4]
344         adds    r3,r3,r11
345         adc     r4,r4,r12
346         adds    r3,r3,r9
347         adc     r4,r4,r10
348         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
349         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
350         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
351         mov     r9,r7,lsr#14
352         str     r3,[sp,#64+0]
353         mov     r10,r8,lsr#14
354         str     r4,[sp,#64+4]
355         eor     r9,r9,r8,lsl#18
356         ldr     r11,[sp,#56+0]  @ h.lo
357         eor     r10,r10,r7,lsl#18
358         ldr     r12,[sp,#56+4]  @ h.hi
359         eor     r9,r9,r7,lsr#18
360         eor     r10,r10,r8,lsr#18
361         eor     r9,r9,r8,lsl#14
362         eor     r10,r10,r7,lsl#14
363         eor     r9,r9,r8,lsr#9
364         eor     r10,r10,r7,lsr#9
365         eor     r9,r9,r7,lsl#23
366         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
367         adds    r3,r3,r9
368         ldr     r9,[sp,#40+0]   @ f.lo
369         adc     r4,r4,r10               @ T += Sigma1(e)
370         ldr     r10,[sp,#40+4]  @ f.hi
371         adds    r3,r3,r11
372         ldr     r11,[sp,#48+0]  @ g.lo
373         adc     r4,r4,r12               @ T += h
374         ldr     r12,[sp,#48+4]  @ g.hi
375
376         eor     r9,r9,r11
377         str     r7,[sp,#32+0]
378         eor     r10,r10,r12
379         str     r8,[sp,#32+4]
380         and     r9,r9,r7
381         str     r5,[sp,#0+0]
382         and     r10,r10,r8
383         str     r6,[sp,#0+4]
384         eor     r9,r9,r11
385         ldr     r11,[r14,#LO]   @ K[i].lo
386         eor     r10,r10,r12             @ Ch(e,f,g)
387         ldr     r12,[r14,#HI]   @ K[i].hi
388
389         adds    r3,r3,r9
390         ldr     r7,[sp,#24+0]   @ d.lo
391         adc     r4,r4,r10               @ T += Ch(e,f,g)
392         ldr     r8,[sp,#24+4]   @ d.hi
393         adds    r3,r3,r11
394         and     r9,r11,#0xff
395         adc     r4,r4,r12               @ T += K[i]
396         adds    r7,r7,r3
397         ldr     r11,[sp,#8+0]   @ b.lo
398         adc     r8,r8,r4                @ d += T
399         teq     r9,#23
400
401         ldr     r12,[sp,#16+0]  @ c.lo
402 #if __ARM_ARCH__>=7
403         it      eq                      @ Thumb2 thing, sanity check in ARM
404 #endif
405         orreq   r14,r14,#1
406         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
407         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
408         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
409         mov     r9,r5,lsr#28
410         mov     r10,r6,lsr#28
411         eor     r9,r9,r6,lsl#4
412         eor     r10,r10,r5,lsl#4
413         eor     r9,r9,r6,lsr#2
414         eor     r10,r10,r5,lsr#2
415         eor     r9,r9,r5,lsl#30
416         eor     r10,r10,r6,lsl#30
417         eor     r9,r9,r6,lsr#7
418         eor     r10,r10,r5,lsr#7
419         eor     r9,r9,r5,lsl#25
420         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
421         adds    r3,r3,r9
422         and     r9,r5,r11
423         adc     r4,r4,r10               @ T += Sigma0(a)
424
425         ldr     r10,[sp,#8+4]   @ b.hi
426         orr     r5,r5,r11
427         ldr     r11,[sp,#16+4]  @ c.hi
428         and     r5,r5,r12
429         and     r12,r6,r10
430         orr     r6,r6,r10
431         orr     r5,r5,r9                @ Maj(a,b,c).lo
432         and     r6,r6,r11
433         adds    r5,r5,r3
434         orr     r6,r6,r12               @ Maj(a,b,c).hi
435         sub     sp,sp,#8
436         adc     r6,r6,r4                @ h += T
437         tst     r14,#1
438         add     r14,r14,#8
439 #if __ARM_ARCH__>=7
440         ittt    eq                      @ Thumb2 thing, sanity check in ARM
441 #endif
442         ldreq   r9,[sp,#184+0]
443         ldreq   r10,[sp,#184+4]
444         beq     .L16_79
445         bic     r14,r14,#1
446
447         ldr     r3,[sp,#8+0]
448         ldr     r4,[sp,#8+4]
449         ldr     r9, [r0,#0+LO]
450         ldr     r10, [r0,#0+HI]
451         ldr     r11, [r0,#8+LO]
452         ldr     r12, [r0,#8+HI]
453         adds    r9,r5,r9
454         str     r9, [r0,#0+LO]
455         adc     r10,r6,r10
456         str     r10, [r0,#0+HI]
457         adds    r11,r3,r11
458         str     r11, [r0,#8+LO]
459         adc     r12,r4,r12
460         str     r12, [r0,#8+HI]
461
462         ldr     r5,[sp,#16+0]
463         ldr     r6,[sp,#16+4]
464         ldr     r3,[sp,#24+0]
465         ldr     r4,[sp,#24+4]
466         ldr     r9, [r0,#16+LO]
467         ldr     r10, [r0,#16+HI]
468         ldr     r11, [r0,#24+LO]
469         ldr     r12, [r0,#24+HI]
470         adds    r9,r5,r9
471         str     r9, [r0,#16+LO]
472         adc     r10,r6,r10
473         str     r10, [r0,#16+HI]
474         adds    r11,r3,r11
475         str     r11, [r0,#24+LO]
476         adc     r12,r4,r12
477         str     r12, [r0,#24+HI]
478
479         ldr     r3,[sp,#40+0]
480         ldr     r4,[sp,#40+4]
481         ldr     r9, [r0,#32+LO]
482         ldr     r10, [r0,#32+HI]
483         ldr     r11, [r0,#40+LO]
484         ldr     r12, [r0,#40+HI]
485         adds    r7,r7,r9
486         str     r7,[r0,#32+LO]
487         adc     r8,r8,r10
488         str     r8,[r0,#32+HI]
489         adds    r11,r3,r11
490         str     r11, [r0,#40+LO]
491         adc     r12,r4,r12
492         str     r12, [r0,#40+HI]
493
494         ldr     r5,[sp,#48+0]
495         ldr     r6,[sp,#48+4]
496         ldr     r3,[sp,#56+0]
497         ldr     r4,[sp,#56+4]
498         ldr     r9, [r0,#48+LO]
499         ldr     r10, [r0,#48+HI]
500         ldr     r11, [r0,#56+LO]
501         ldr     r12, [r0,#56+HI]
502         adds    r9,r5,r9
503         str     r9, [r0,#48+LO]
504         adc     r10,r6,r10
505         str     r10, [r0,#48+HI]
506         adds    r11,r3,r11
507         str     r11, [r0,#56+LO]
508         adc     r12,r4,r12
509         str     r12, [r0,#56+HI]
510
511         add     sp,sp,#640
512         sub     r14,r14,#640
513
514         teq     r1,r2
515         bne     .Loop
516
517         add     sp,sp,#8*9              @ destroy frame
518 #if __ARM_ARCH__>=5
519         ldmia   sp!,{r4-r12,pc}
520 #else
521         ldmia   sp!,{r4-r12,lr}
522         tst     lr,#1
523         moveq   pc,lr                   @ be binary compatible with V4, yet
524         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
525 #endif
526 .size   sha512_block_data_order,.-sha512_block_data_order
527 #if __ARM_MAX_ARCH__>=7
528 .arch   armv7-a
529 .fpu    neon
530
531 .global sha512_block_data_order_neon
532 .type   sha512_block_data_order_neon,%function
533 .align  4
534 sha512_block_data_order_neon:
535 .LNEON:
536         dmb                             @ errata #451034 on early Cortex A8
537         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
538         VFP_ABI_PUSH
539         adrl    r3,K512
540         vldmia  r0,{d16-d23}            @ load context
541 .Loop_neon:
542         vshr.u64        d24,d20,#14     @ 0
543 #if 0<16
544         vld1.64         {d0},[r1]!      @ handles unaligned
545 #endif
546         vshr.u64        d25,d20,#18
547 #if 0>0
548          vadd.i64       d16,d30                 @ h+=Maj from the past
549 #endif
550         vshr.u64        d26,d20,#41
551         vld1.64         {d28},[r3,:64]! @ K[i++]
552         vsli.64         d24,d20,#50
553         vsli.64         d25,d20,#46
554         vmov            d29,d20
555         vsli.64         d26,d20,#23
556 #if 0<16 && defined(__ARMEL__)
557         vrev64.8        d0,d0
558 #endif
559         veor            d25,d24
560         vbsl            d29,d21,d22             @ Ch(e,f,g)
561         vshr.u64        d24,d16,#28
562         veor            d26,d25                 @ Sigma1(e)
563         vadd.i64        d27,d29,d23
564         vshr.u64        d25,d16,#34
565         vsli.64         d24,d16,#36
566         vadd.i64        d27,d26
567         vshr.u64        d26,d16,#39
568         vadd.i64        d28,d0
569         vsli.64         d25,d16,#30
570         veor            d30,d16,d17
571         vsli.64         d26,d16,#25
572         veor            d23,d24,d25
573         vadd.i64        d27,d28
574         vbsl            d30,d18,d17             @ Maj(a,b,c)
575         veor            d23,d26                 @ Sigma0(a)
576         vadd.i64        d19,d27
577         vadd.i64        d30,d27
578         @ vadd.i64      d23,d30
579         vshr.u64        d24,d19,#14     @ 1
580 #if 1<16
581         vld1.64         {d1},[r1]!      @ handles unaligned
582 #endif
583         vshr.u64        d25,d19,#18
584 #if 1>0
585          vadd.i64       d23,d30                 @ h+=Maj from the past
586 #endif
587         vshr.u64        d26,d19,#41
588         vld1.64         {d28},[r3,:64]! @ K[i++]
589         vsli.64         d24,d19,#50
590         vsli.64         d25,d19,#46
591         vmov            d29,d19
592         vsli.64         d26,d19,#23
593 #if 1<16 && defined(__ARMEL__)
594         vrev64.8        d1,d1
595 #endif
596         veor            d25,d24
597         vbsl            d29,d20,d21             @ Ch(e,f,g)
598         vshr.u64        d24,d23,#28
599         veor            d26,d25                 @ Sigma1(e)
600         vadd.i64        d27,d29,d22
601         vshr.u64        d25,d23,#34
602         vsli.64         d24,d23,#36
603         vadd.i64        d27,d26
604         vshr.u64        d26,d23,#39
605         vadd.i64        d28,d1
606         vsli.64         d25,d23,#30
607         veor            d30,d23,d16
608         vsli.64         d26,d23,#25
609         veor            d22,d24,d25
610         vadd.i64        d27,d28
611         vbsl            d30,d17,d16             @ Maj(a,b,c)
612         veor            d22,d26                 @ Sigma0(a)
613         vadd.i64        d18,d27
614         vadd.i64        d30,d27
615         @ vadd.i64      d22,d30
616         vshr.u64        d24,d18,#14     @ 2
617 #if 2<16
618         vld1.64         {d2},[r1]!      @ handles unaligned
619 #endif
620         vshr.u64        d25,d18,#18
621 #if 2>0
622          vadd.i64       d22,d30                 @ h+=Maj from the past
623 #endif
624         vshr.u64        d26,d18,#41
625         vld1.64         {d28},[r3,:64]! @ K[i++]
626         vsli.64         d24,d18,#50
627         vsli.64         d25,d18,#46
628         vmov            d29,d18
629         vsli.64         d26,d18,#23
630 #if 2<16 && defined(__ARMEL__)
631         vrev64.8        d2,d2
632 #endif
633         veor            d25,d24
634         vbsl            d29,d19,d20             @ Ch(e,f,g)
635         vshr.u64        d24,d22,#28
636         veor            d26,d25                 @ Sigma1(e)
637         vadd.i64        d27,d29,d21
638         vshr.u64        d25,d22,#34
639         vsli.64         d24,d22,#36
640         vadd.i64        d27,d26
641         vshr.u64        d26,d22,#39
642         vadd.i64        d28,d2
643         vsli.64         d25,d22,#30
644         veor            d30,d22,d23
645         vsli.64         d26,d22,#25
646         veor            d21,d24,d25
647         vadd.i64        d27,d28
648         vbsl            d30,d16,d23             @ Maj(a,b,c)
649         veor            d21,d26                 @ Sigma0(a)
650         vadd.i64        d17,d27
651         vadd.i64        d30,d27
652         @ vadd.i64      d21,d30
653         vshr.u64        d24,d17,#14     @ 3
654 #if 3<16
655         vld1.64         {d3},[r1]!      @ handles unaligned
656 #endif
657         vshr.u64        d25,d17,#18
658 #if 3>0
659          vadd.i64       d21,d30                 @ h+=Maj from the past
660 #endif
661         vshr.u64        d26,d17,#41
662         vld1.64         {d28},[r3,:64]! @ K[i++]
663         vsli.64         d24,d17,#50
664         vsli.64         d25,d17,#46
665         vmov            d29,d17
666         vsli.64         d26,d17,#23
667 #if 3<16 && defined(__ARMEL__)
668         vrev64.8        d3,d3
669 #endif
670         veor            d25,d24
671         vbsl            d29,d18,d19             @ Ch(e,f,g)
672         vshr.u64        d24,d21,#28
673         veor            d26,d25                 @ Sigma1(e)
674         vadd.i64        d27,d29,d20
675         vshr.u64        d25,d21,#34
676         vsli.64         d24,d21,#36
677         vadd.i64        d27,d26
678         vshr.u64        d26,d21,#39
679         vadd.i64        d28,d3
680         vsli.64         d25,d21,#30
681         veor            d30,d21,d22
682         vsli.64         d26,d21,#25
683         veor            d20,d24,d25
684         vadd.i64        d27,d28
685         vbsl            d30,d23,d22             @ Maj(a,b,c)
686         veor            d20,d26                 @ Sigma0(a)
687         vadd.i64        d16,d27
688         vadd.i64        d30,d27
689         @ vadd.i64      d20,d30
690         vshr.u64        d24,d16,#14     @ 4
691 #if 4<16
692         vld1.64         {d4},[r1]!      @ handles unaligned
693 #endif
694         vshr.u64        d25,d16,#18
695 #if 4>0
696          vadd.i64       d20,d30                 @ h+=Maj from the past
697 #endif
698         vshr.u64        d26,d16,#41
699         vld1.64         {d28},[r3,:64]! @ K[i++]
700         vsli.64         d24,d16,#50
701         vsli.64         d25,d16,#46
702         vmov            d29,d16
703         vsli.64         d26,d16,#23
704 #if 4<16 && defined(__ARMEL__)
705         vrev64.8        d4,d4
706 #endif
707         veor            d25,d24
708         vbsl            d29,d17,d18             @ Ch(e,f,g)
709         vshr.u64        d24,d20,#28
710         veor            d26,d25                 @ Sigma1(e)
711         vadd.i64        d27,d29,d19
712         vshr.u64        d25,d20,#34
713         vsli.64         d24,d20,#36
714         vadd.i64        d27,d26
715         vshr.u64        d26,d20,#39
716         vadd.i64        d28,d4
717         vsli.64         d25,d20,#30
718         veor            d30,d20,d21
719         vsli.64         d26,d20,#25
720         veor            d19,d24,d25
721         vadd.i64        d27,d28
722         vbsl            d30,d22,d21             @ Maj(a,b,c)
723         veor            d19,d26                 @ Sigma0(a)
724         vadd.i64        d23,d27
725         vadd.i64        d30,d27
726         @ vadd.i64      d19,d30
727         vshr.u64        d24,d23,#14     @ 5
728 #if 5<16
729         vld1.64         {d5},[r1]!      @ handles unaligned
730 #endif
731         vshr.u64        d25,d23,#18
732 #if 5>0
733          vadd.i64       d19,d30                 @ h+=Maj from the past
734 #endif
735         vshr.u64        d26,d23,#41
736         vld1.64         {d28},[r3,:64]! @ K[i++]
737         vsli.64         d24,d23,#50
738         vsli.64         d25,d23,#46
739         vmov            d29,d23
740         vsli.64         d26,d23,#23
741 #if 5<16 && defined(__ARMEL__)
742         vrev64.8        d5,d5
743 #endif
744         veor            d25,d24
745         vbsl            d29,d16,d17             @ Ch(e,f,g)
746         vshr.u64        d24,d19,#28
747         veor            d26,d25                 @ Sigma1(e)
748         vadd.i64        d27,d29,d18
749         vshr.u64        d25,d19,#34
750         vsli.64         d24,d19,#36
751         vadd.i64        d27,d26
752         vshr.u64        d26,d19,#39
753         vadd.i64        d28,d5
754         vsli.64         d25,d19,#30
755         veor            d30,d19,d20
756         vsli.64         d26,d19,#25
757         veor            d18,d24,d25
758         vadd.i64        d27,d28
759         vbsl            d30,d21,d20             @ Maj(a,b,c)
760         veor            d18,d26                 @ Sigma0(a)
761         vadd.i64        d22,d27
762         vadd.i64        d30,d27
763         @ vadd.i64      d18,d30
764         vshr.u64        d24,d22,#14     @ 6
765 #if 6<16
766         vld1.64         {d6},[r1]!      @ handles unaligned
767 #endif
768         vshr.u64        d25,d22,#18
769 #if 6>0
770          vadd.i64       d18,d30                 @ h+=Maj from the past
771 #endif
772         vshr.u64        d26,d22,#41
773         vld1.64         {d28},[r3,:64]! @ K[i++]
774         vsli.64         d24,d22,#50
775         vsli.64         d25,d22,#46
776         vmov            d29,d22
777         vsli.64         d26,d22,#23
778 #if 6<16 && defined(__ARMEL__)
779         vrev64.8        d6,d6
780 #endif
781         veor            d25,d24
782         vbsl            d29,d23,d16             @ Ch(e,f,g)
783         vshr.u64        d24,d18,#28
784         veor            d26,d25                 @ Sigma1(e)
785         vadd.i64        d27,d29,d17
786         vshr.u64        d25,d18,#34
787         vsli.64         d24,d18,#36
788         vadd.i64        d27,d26
789         vshr.u64        d26,d18,#39
790         vadd.i64        d28,d6
791         vsli.64         d25,d18,#30
792         veor            d30,d18,d19
793         vsli.64         d26,d18,#25
794         veor            d17,d24,d25
795         vadd.i64        d27,d28
796         vbsl            d30,d20,d19             @ Maj(a,b,c)
797         veor            d17,d26                 @ Sigma0(a)
798         vadd.i64        d21,d27
799         vadd.i64        d30,d27
800         @ vadd.i64      d17,d30
801         vshr.u64        d24,d21,#14     @ 7
802 #if 7<16
803         vld1.64         {d7},[r1]!      @ handles unaligned
804 #endif
805         vshr.u64        d25,d21,#18
806 #if 7>0
807          vadd.i64       d17,d30                 @ h+=Maj from the past
808 #endif
809         vshr.u64        d26,d21,#41
810         vld1.64         {d28},[r3,:64]! @ K[i++]
811         vsli.64         d24,d21,#50
812         vsli.64         d25,d21,#46
813         vmov            d29,d21
814         vsli.64         d26,d21,#23
815 #if 7<16 && defined(__ARMEL__)
816         vrev64.8        d7,d7
817 #endif
818         veor            d25,d24
819         vbsl            d29,d22,d23             @ Ch(e,f,g)
820         vshr.u64        d24,d17,#28
821         veor            d26,d25                 @ Sigma1(e)
822         vadd.i64        d27,d29,d16
823         vshr.u64        d25,d17,#34
824         vsli.64         d24,d17,#36
825         vadd.i64        d27,d26
826         vshr.u64        d26,d17,#39
827         vadd.i64        d28,d7
828         vsli.64         d25,d17,#30
829         veor            d30,d17,d18
830         vsli.64         d26,d17,#25
831         veor            d16,d24,d25
832         vadd.i64        d27,d28
833         vbsl            d30,d19,d18             @ Maj(a,b,c)
834         veor            d16,d26                 @ Sigma0(a)
835         vadd.i64        d20,d27
836         vadd.i64        d30,d27
837         @ vadd.i64      d16,d30
838         vshr.u64        d24,d20,#14     @ 8
839 #if 8<16
840         vld1.64         {d8},[r1]!      @ handles unaligned
841 #endif
842         vshr.u64        d25,d20,#18
843 #if 8>0
844          vadd.i64       d16,d30                 @ h+=Maj from the past
845 #endif
846         vshr.u64        d26,d20,#41
847         vld1.64         {d28},[r3,:64]! @ K[i++]
848         vsli.64         d24,d20,#50
849         vsli.64         d25,d20,#46
850         vmov            d29,d20
851         vsli.64         d26,d20,#23
852 #if 8<16 && defined(__ARMEL__)
853         vrev64.8        d8,d8
854 #endif
855         veor            d25,d24
856         vbsl            d29,d21,d22             @ Ch(e,f,g)
857         vshr.u64        d24,d16,#28
858         veor            d26,d25                 @ Sigma1(e)
859         vadd.i64        d27,d29,d23
860         vshr.u64        d25,d16,#34
861         vsli.64         d24,d16,#36
862         vadd.i64        d27,d26
863         vshr.u64        d26,d16,#39
864         vadd.i64        d28,d8
865         vsli.64         d25,d16,#30
866         veor            d30,d16,d17
867         vsli.64         d26,d16,#25
868         veor            d23,d24,d25
869         vadd.i64        d27,d28
870         vbsl            d30,d18,d17             @ Maj(a,b,c)
871         veor            d23,d26                 @ Sigma0(a)
872         vadd.i64        d19,d27
873         vadd.i64        d30,d27
874         @ vadd.i64      d23,d30
875         vshr.u64        d24,d19,#14     @ 9
876 #if 9<16
877         vld1.64         {d9},[r1]!      @ handles unaligned
878 #endif
879         vshr.u64        d25,d19,#18
880 #if 9>0
881          vadd.i64       d23,d30                 @ h+=Maj from the past
882 #endif
883         vshr.u64        d26,d19,#41
884         vld1.64         {d28},[r3,:64]! @ K[i++]
885         vsli.64         d24,d19,#50
886         vsli.64         d25,d19,#46
887         vmov            d29,d19
888         vsli.64         d26,d19,#23
889 #if 9<16 && defined(__ARMEL__)
890         vrev64.8        d9,d9
891 #endif
892         veor            d25,d24
893         vbsl            d29,d20,d21             @ Ch(e,f,g)
894         vshr.u64        d24,d23,#28
895         veor            d26,d25                 @ Sigma1(e)
896         vadd.i64        d27,d29,d22
897         vshr.u64        d25,d23,#34
898         vsli.64         d24,d23,#36
899         vadd.i64        d27,d26
900         vshr.u64        d26,d23,#39
901         vadd.i64        d28,d9
902         vsli.64         d25,d23,#30
903         veor            d30,d23,d16
904         vsli.64         d26,d23,#25
905         veor            d22,d24,d25
906         vadd.i64        d27,d28
907         vbsl            d30,d17,d16             @ Maj(a,b,c)
908         veor            d22,d26                 @ Sigma0(a)
909         vadd.i64        d18,d27
910         vadd.i64        d30,d27
911         @ vadd.i64      d22,d30
912         vshr.u64        d24,d18,#14     @ 10
913 #if 10<16
914         vld1.64         {d10},[r1]!     @ handles unaligned
915 #endif
916         vshr.u64        d25,d18,#18
917 #if 10>0
918          vadd.i64       d22,d30                 @ h+=Maj from the past
919 #endif
920         vshr.u64        d26,d18,#41
921         vld1.64         {d28},[r3,:64]! @ K[i++]
922         vsli.64         d24,d18,#50
923         vsli.64         d25,d18,#46
924         vmov            d29,d18
925         vsli.64         d26,d18,#23
926 #if 10<16 && defined(__ARMEL__)
927         vrev64.8        d10,d10
928 #endif
929         veor            d25,d24
930         vbsl            d29,d19,d20             @ Ch(e,f,g)
931         vshr.u64        d24,d22,#28
932         veor            d26,d25                 @ Sigma1(e)
933         vadd.i64        d27,d29,d21
934         vshr.u64        d25,d22,#34
935         vsli.64         d24,d22,#36
936         vadd.i64        d27,d26
937         vshr.u64        d26,d22,#39
938         vadd.i64        d28,d10
939         vsli.64         d25,d22,#30
940         veor            d30,d22,d23
941         vsli.64         d26,d22,#25
942         veor            d21,d24,d25
943         vadd.i64        d27,d28
944         vbsl            d30,d16,d23             @ Maj(a,b,c)
945         veor            d21,d26                 @ Sigma0(a)
946         vadd.i64        d17,d27
947         vadd.i64        d30,d27
948         @ vadd.i64      d21,d30
949         vshr.u64        d24,d17,#14     @ 11
950 #if 11<16
951         vld1.64         {d11},[r1]!     @ handles unaligned
952 #endif
953         vshr.u64        d25,d17,#18
954 #if 11>0
955          vadd.i64       d21,d30                 @ h+=Maj from the past
956 #endif
957         vshr.u64        d26,d17,#41
958         vld1.64         {d28},[r3,:64]! @ K[i++]
959         vsli.64         d24,d17,#50
960         vsli.64         d25,d17,#46
961         vmov            d29,d17
962         vsli.64         d26,d17,#23
963 #if 11<16 && defined(__ARMEL__)
964         vrev64.8        d11,d11
965 #endif
966         veor            d25,d24
967         vbsl            d29,d18,d19             @ Ch(e,f,g)
968         vshr.u64        d24,d21,#28
969         veor            d26,d25                 @ Sigma1(e)
970         vadd.i64        d27,d29,d20
971         vshr.u64        d25,d21,#34
972         vsli.64         d24,d21,#36
973         vadd.i64        d27,d26
974         vshr.u64        d26,d21,#39
975         vadd.i64        d28,d11
976         vsli.64         d25,d21,#30
977         veor            d30,d21,d22
978         vsli.64         d26,d21,#25
979         veor            d20,d24,d25
980         vadd.i64        d27,d28
981         vbsl            d30,d23,d22             @ Maj(a,b,c)
982         veor            d20,d26                 @ Sigma0(a)
983         vadd.i64        d16,d27
984         vadd.i64        d30,d27
985         @ vadd.i64      d20,d30
986         vshr.u64        d24,d16,#14     @ 12
987 #if 12<16
988         vld1.64         {d12},[r1]!     @ handles unaligned
989 #endif
990         vshr.u64        d25,d16,#18
991 #if 12>0
992          vadd.i64       d20,d30                 @ h+=Maj from the past
993 #endif
994         vshr.u64        d26,d16,#41
995         vld1.64         {d28},[r3,:64]! @ K[i++]
996         vsli.64         d24,d16,#50
997         vsli.64         d25,d16,#46
998         vmov            d29,d16
999         vsli.64         d26,d16,#23
1000 #if 12<16 && defined(__ARMEL__)
1001         vrev64.8        d12,d12
1002 #endif
1003         veor            d25,d24
1004         vbsl            d29,d17,d18             @ Ch(e,f,g)
1005         vshr.u64        d24,d20,#28
1006         veor            d26,d25                 @ Sigma1(e)
1007         vadd.i64        d27,d29,d19
1008         vshr.u64        d25,d20,#34
1009         vsli.64         d24,d20,#36
1010         vadd.i64        d27,d26
1011         vshr.u64        d26,d20,#39
1012         vadd.i64        d28,d12
1013         vsli.64         d25,d20,#30
1014         veor            d30,d20,d21
1015         vsli.64         d26,d20,#25
1016         veor            d19,d24,d25
1017         vadd.i64        d27,d28
1018         vbsl            d30,d22,d21             @ Maj(a,b,c)
1019         veor            d19,d26                 @ Sigma0(a)
1020         vadd.i64        d23,d27
1021         vadd.i64        d30,d27
1022         @ vadd.i64      d19,d30
1023         vshr.u64        d24,d23,#14     @ 13
1024 #if 13<16
1025         vld1.64         {d13},[r1]!     @ handles unaligned
1026 #endif
1027         vshr.u64        d25,d23,#18
1028 #if 13>0
1029          vadd.i64       d19,d30                 @ h+=Maj from the past
1030 #endif
1031         vshr.u64        d26,d23,#41
1032         vld1.64         {d28},[r3,:64]! @ K[i++]
1033         vsli.64         d24,d23,#50
1034         vsli.64         d25,d23,#46
1035         vmov            d29,d23
1036         vsli.64         d26,d23,#23
1037 #if 13<16 && defined(__ARMEL__)
1038         vrev64.8        d13,d13
1039 #endif
1040         veor            d25,d24
1041         vbsl            d29,d16,d17             @ Ch(e,f,g)
1042         vshr.u64        d24,d19,#28
1043         veor            d26,d25                 @ Sigma1(e)
1044         vadd.i64        d27,d29,d18
1045         vshr.u64        d25,d19,#34
1046         vsli.64         d24,d19,#36
1047         vadd.i64        d27,d26
1048         vshr.u64        d26,d19,#39
1049         vadd.i64        d28,d13
1050         vsli.64         d25,d19,#30
1051         veor            d30,d19,d20
1052         vsli.64         d26,d19,#25
1053         veor            d18,d24,d25
1054         vadd.i64        d27,d28
1055         vbsl            d30,d21,d20             @ Maj(a,b,c)
1056         veor            d18,d26                 @ Sigma0(a)
1057         vadd.i64        d22,d27
1058         vadd.i64        d30,d27
1059         @ vadd.i64      d18,d30
1060         vshr.u64        d24,d22,#14     @ 14
1061 #if 14<16
1062         vld1.64         {d14},[r1]!     @ handles unaligned
1063 #endif
1064         vshr.u64        d25,d22,#18
1065 #if 14>0
1066          vadd.i64       d18,d30                 @ h+=Maj from the past
1067 #endif
1068         vshr.u64        d26,d22,#41
1069         vld1.64         {d28},[r3,:64]! @ K[i++]
1070         vsli.64         d24,d22,#50
1071         vsli.64         d25,d22,#46
1072         vmov            d29,d22
1073         vsli.64         d26,d22,#23
1074 #if 14<16 && defined(__ARMEL__)
1075         vrev64.8        d14,d14
1076 #endif
1077         veor            d25,d24
1078         vbsl            d29,d23,d16             @ Ch(e,f,g)
1079         vshr.u64        d24,d18,#28
1080         veor            d26,d25                 @ Sigma1(e)
1081         vadd.i64        d27,d29,d17
1082         vshr.u64        d25,d18,#34
1083         vsli.64         d24,d18,#36
1084         vadd.i64        d27,d26
1085         vshr.u64        d26,d18,#39
1086         vadd.i64        d28,d14
1087         vsli.64         d25,d18,#30
1088         veor            d30,d18,d19
1089         vsli.64         d26,d18,#25
1090         veor            d17,d24,d25
1091         vadd.i64        d27,d28
1092         vbsl            d30,d20,d19             @ Maj(a,b,c)
1093         veor            d17,d26                 @ Sigma0(a)
1094         vadd.i64        d21,d27
1095         vadd.i64        d30,d27
1096         @ vadd.i64      d17,d30
1097         vshr.u64        d24,d21,#14     @ 15
1098 #if 15<16
1099         vld1.64         {d15},[r1]!     @ handles unaligned
1100 #endif
1101         vshr.u64        d25,d21,#18
1102 #if 15>0
1103          vadd.i64       d17,d30                 @ h+=Maj from the past
1104 #endif
1105         vshr.u64        d26,d21,#41
1106         vld1.64         {d28},[r3,:64]! @ K[i++]
1107         vsli.64         d24,d21,#50
1108         vsli.64         d25,d21,#46
1109         vmov            d29,d21
1110         vsli.64         d26,d21,#23
1111 #if 15<16 && defined(__ARMEL__)
1112         vrev64.8        d15,d15
1113 #endif
1114         veor            d25,d24
1115         vbsl            d29,d22,d23             @ Ch(e,f,g)
1116         vshr.u64        d24,d17,#28
1117         veor            d26,d25                 @ Sigma1(e)
1118         vadd.i64        d27,d29,d16
1119         vshr.u64        d25,d17,#34
1120         vsli.64         d24,d17,#36
1121         vadd.i64        d27,d26
1122         vshr.u64        d26,d17,#39
1123         vadd.i64        d28,d15
1124         vsli.64         d25,d17,#30
1125         veor            d30,d17,d18
1126         vsli.64         d26,d17,#25
1127         veor            d16,d24,d25
1128         vadd.i64        d27,d28
1129         vbsl            d30,d19,d18             @ Maj(a,b,c)
1130         veor            d16,d26                 @ Sigma0(a)
1131         vadd.i64        d20,d27
1132         vadd.i64        d30,d27
1133         @ vadd.i64      d16,d30
1134         mov             r12,#4
1135 .L16_79_neon:
1136         subs            r12,#1
1137         vshr.u64        q12,q7,#19
1138         vshr.u64        q13,q7,#61
1139          vadd.i64       d16,d30                 @ h+=Maj from the past
1140         vshr.u64        q15,q7,#6
1141         vsli.64         q12,q7,#45
1142         vext.8          q14,q0,q1,#8    @ X[i+1]
1143         vsli.64         q13,q7,#3
1144         veor            q15,q12
1145         vshr.u64        q12,q14,#1
1146         veor            q15,q13                         @ sigma1(X[i+14])
1147         vshr.u64        q13,q14,#8
1148         vadd.i64        q0,q15
1149         vshr.u64        q15,q14,#7
1150         vsli.64         q12,q14,#63
1151         vsli.64         q13,q14,#56
1152         vext.8          q14,q4,q5,#8    @ X[i+9]
1153         veor            q15,q12
1154         vshr.u64        d24,d20,#14             @ from NEON_00_15
1155         vadd.i64        q0,q14
1156         vshr.u64        d25,d20,#18             @ from NEON_00_15
1157         veor            q15,q13                         @ sigma0(X[i+1])
1158         vshr.u64        d26,d20,#41             @ from NEON_00_15
1159         vadd.i64        q0,q15
1160         vld1.64         {d28},[r3,:64]! @ K[i++]
1161         vsli.64         d24,d20,#50
1162         vsli.64         d25,d20,#46
1163         vmov            d29,d20
1164         vsli.64         d26,d20,#23
1165 #if 16<16 && defined(__ARMEL__)
1166         vrev64.8        ,
1167 #endif
1168         veor            d25,d24
1169         vbsl            d29,d21,d22             @ Ch(e,f,g)
1170         vshr.u64        d24,d16,#28
1171         veor            d26,d25                 @ Sigma1(e)
1172         vadd.i64        d27,d29,d23
1173         vshr.u64        d25,d16,#34
1174         vsli.64         d24,d16,#36
1175         vadd.i64        d27,d26
1176         vshr.u64        d26,d16,#39
1177         vadd.i64        d28,d0
1178         vsli.64         d25,d16,#30
1179         veor            d30,d16,d17
1180         vsli.64         d26,d16,#25
1181         veor            d23,d24,d25
1182         vadd.i64        d27,d28
1183         vbsl            d30,d18,d17             @ Maj(a,b,c)
1184         veor            d23,d26                 @ Sigma0(a)
1185         vadd.i64        d19,d27
1186         vadd.i64        d30,d27
1187         @ vadd.i64      d23,d30
1188         vshr.u64        d24,d19,#14     @ 17
1189 #if 17<16
1190         vld1.64         {d1},[r1]!      @ handles unaligned
1191 #endif
1192         vshr.u64        d25,d19,#18
1193 #if 17>0
1194          vadd.i64       d23,d30                 @ h+=Maj from the past
1195 #endif
1196         vshr.u64        d26,d19,#41
1197         vld1.64         {d28},[r3,:64]! @ K[i++]
1198         vsli.64         d24,d19,#50
1199         vsli.64         d25,d19,#46
1200         vmov            d29,d19
1201         vsli.64         d26,d19,#23
1202 #if 17<16 && defined(__ARMEL__)
1203         vrev64.8        ,
1204 #endif
1205         veor            d25,d24
1206         vbsl            d29,d20,d21             @ Ch(e,f,g)
1207         vshr.u64        d24,d23,#28
1208         veor            d26,d25                 @ Sigma1(e)
1209         vadd.i64        d27,d29,d22
1210         vshr.u64        d25,d23,#34
1211         vsli.64         d24,d23,#36
1212         vadd.i64        d27,d26
1213         vshr.u64        d26,d23,#39
1214         vadd.i64        d28,d1
1215         vsli.64         d25,d23,#30
1216         veor            d30,d23,d16
1217         vsli.64         d26,d23,#25
1218         veor            d22,d24,d25
1219         vadd.i64        d27,d28
1220         vbsl            d30,d17,d16             @ Maj(a,b,c)
1221         veor            d22,d26                 @ Sigma0(a)
1222         vadd.i64        d18,d27
1223         vadd.i64        d30,d27
1224         @ vadd.i64      d22,d30
1225         vshr.u64        q12,q0,#19
1226         vshr.u64        q13,q0,#61
1227          vadd.i64       d22,d30                 @ h+=Maj from the past
1228         vshr.u64        q15,q0,#6
1229         vsli.64         q12,q0,#45
1230         vext.8          q14,q1,q2,#8    @ X[i+1]
1231         vsli.64         q13,q0,#3
1232         veor            q15,q12
1233         vshr.u64        q12,q14,#1
1234         veor            q15,q13                         @ sigma1(X[i+14])
1235         vshr.u64        q13,q14,#8
1236         vadd.i64        q1,q15
1237         vshr.u64        q15,q14,#7
1238         vsli.64         q12,q14,#63
1239         vsli.64         q13,q14,#56
1240         vext.8          q14,q5,q6,#8    @ X[i+9]
1241         veor            q15,q12
1242         vshr.u64        d24,d18,#14             @ from NEON_00_15
1243         vadd.i64        q1,q14
1244         vshr.u64        d25,d18,#18             @ from NEON_00_15
1245         veor            q15,q13                         @ sigma0(X[i+1])
1246         vshr.u64        d26,d18,#41             @ from NEON_00_15
1247         vadd.i64        q1,q15
1248         vld1.64         {d28},[r3,:64]! @ K[i++]
1249         vsli.64         d24,d18,#50
1250         vsli.64         d25,d18,#46
1251         vmov            d29,d18
1252         vsli.64         d26,d18,#23
1253 #if 18<16 && defined(__ARMEL__)
1254         vrev64.8        ,
1255 #endif
1256         veor            d25,d24
1257         vbsl            d29,d19,d20             @ Ch(e,f,g)
1258         vshr.u64        d24,d22,#28
1259         veor            d26,d25                 @ Sigma1(e)
1260         vadd.i64        d27,d29,d21
1261         vshr.u64        d25,d22,#34
1262         vsli.64         d24,d22,#36
1263         vadd.i64        d27,d26
1264         vshr.u64        d26,d22,#39
1265         vadd.i64        d28,d2
1266         vsli.64         d25,d22,#30
1267         veor            d30,d22,d23
1268         vsli.64         d26,d22,#25
1269         veor            d21,d24,d25
1270         vadd.i64        d27,d28
1271         vbsl            d30,d16,d23             @ Maj(a,b,c)
1272         veor            d21,d26                 @ Sigma0(a)
1273         vadd.i64        d17,d27
1274         vadd.i64        d30,d27
1275         @ vadd.i64      d21,d30
1276         vshr.u64        d24,d17,#14     @ 19
1277 #if 19<16
1278         vld1.64         {d3},[r1]!      @ handles unaligned
1279 #endif
1280         vshr.u64        d25,d17,#18
1281 #if 19>0
1282          vadd.i64       d21,d30                 @ h+=Maj from the past
1283 #endif
1284         vshr.u64        d26,d17,#41
1285         vld1.64         {d28},[r3,:64]! @ K[i++]
1286         vsli.64         d24,d17,#50
1287         vsli.64         d25,d17,#46
1288         vmov            d29,d17
1289         vsli.64         d26,d17,#23
1290 #if 19<16 && defined(__ARMEL__)
1291         vrev64.8        ,
1292 #endif
1293         veor            d25,d24
1294         vbsl            d29,d18,d19             @ Ch(e,f,g)
1295         vshr.u64        d24,d21,#28
1296         veor            d26,d25                 @ Sigma1(e)
1297         vadd.i64        d27,d29,d20
1298         vshr.u64        d25,d21,#34
1299         vsli.64         d24,d21,#36
1300         vadd.i64        d27,d26
1301         vshr.u64        d26,d21,#39
1302         vadd.i64        d28,d3
1303         vsli.64         d25,d21,#30
1304         veor            d30,d21,d22
1305         vsli.64         d26,d21,#25
1306         veor            d20,d24,d25
1307         vadd.i64        d27,d28
1308         vbsl            d30,d23,d22             @ Maj(a,b,c)
1309         veor            d20,d26                 @ Sigma0(a)
1310         vadd.i64        d16,d27
1311         vadd.i64        d30,d27
1312         @ vadd.i64      d20,d30
1313         vshr.u64        q12,q1,#19
1314         vshr.u64        q13,q1,#61
1315          vadd.i64       d20,d30                 @ h+=Maj from the past
1316         vshr.u64        q15,q1,#6
1317         vsli.64         q12,q1,#45
1318         vext.8          q14,q2,q3,#8    @ X[i+1]
1319         vsli.64         q13,q1,#3
1320         veor            q15,q12
1321         vshr.u64        q12,q14,#1
1322         veor            q15,q13                         @ sigma1(X[i+14])
1323         vshr.u64        q13,q14,#8
1324         vadd.i64        q2,q15
1325         vshr.u64        q15,q14,#7
1326         vsli.64         q12,q14,#63
1327         vsli.64         q13,q14,#56
1328         vext.8          q14,q6,q7,#8    @ X[i+9]
1329         veor            q15,q12
1330         vshr.u64        d24,d16,#14             @ from NEON_00_15
1331         vadd.i64        q2,q14
1332         vshr.u64        d25,d16,#18             @ from NEON_00_15
1333         veor            q15,q13                         @ sigma0(X[i+1])
1334         vshr.u64        d26,d16,#41             @ from NEON_00_15
1335         vadd.i64        q2,q15
1336         vld1.64         {d28},[r3,:64]! @ K[i++]
1337         vsli.64         d24,d16,#50
1338         vsli.64         d25,d16,#46
1339         vmov            d29,d16
1340         vsli.64         d26,d16,#23
1341 #if 20<16 && defined(__ARMEL__)
1342         vrev64.8        ,
1343 #endif
1344         veor            d25,d24
1345         vbsl            d29,d17,d18             @ Ch(e,f,g)
1346         vshr.u64        d24,d20,#28
1347         veor            d26,d25                 @ Sigma1(e)
1348         vadd.i64        d27,d29,d19
1349         vshr.u64        d25,d20,#34
1350         vsli.64         d24,d20,#36
1351         vadd.i64        d27,d26
1352         vshr.u64        d26,d20,#39
1353         vadd.i64        d28,d4
1354         vsli.64         d25,d20,#30
1355         veor            d30,d20,d21
1356         vsli.64         d26,d20,#25
1357         veor            d19,d24,d25
1358         vadd.i64        d27,d28
1359         vbsl            d30,d22,d21             @ Maj(a,b,c)
1360         veor            d19,d26                 @ Sigma0(a)
1361         vadd.i64        d23,d27
1362         vadd.i64        d30,d27
1363         @ vadd.i64      d19,d30
1364         vshr.u64        d24,d23,#14     @ 21
1365 #if 21<16
1366         vld1.64         {d5},[r1]!      @ handles unaligned
1367 #endif
1368         vshr.u64        d25,d23,#18
1369 #if 21>0
1370          vadd.i64       d19,d30                 @ h+=Maj from the past
1371 #endif
1372         vshr.u64        d26,d23,#41
1373         vld1.64         {d28},[r3,:64]! @ K[i++]
1374         vsli.64         d24,d23,#50
1375         vsli.64         d25,d23,#46
1376         vmov            d29,d23
1377         vsli.64         d26,d23,#23
1378 #if 21<16 && defined(__ARMEL__)
1379         vrev64.8        ,
1380 #endif
1381         veor            d25,d24
1382         vbsl            d29,d16,d17             @ Ch(e,f,g)
1383         vshr.u64        d24,d19,#28
1384         veor            d26,d25                 @ Sigma1(e)
1385         vadd.i64        d27,d29,d18
1386         vshr.u64        d25,d19,#34
1387         vsli.64         d24,d19,#36
1388         vadd.i64        d27,d26
1389         vshr.u64        d26,d19,#39
1390         vadd.i64        d28,d5
1391         vsli.64         d25,d19,#30
1392         veor            d30,d19,d20
1393         vsli.64         d26,d19,#25
1394         veor            d18,d24,d25
1395         vadd.i64        d27,d28
1396         vbsl            d30,d21,d20             @ Maj(a,b,c)
1397         veor            d18,d26                 @ Sigma0(a)
1398         vadd.i64        d22,d27
1399         vadd.i64        d30,d27
1400         @ vadd.i64      d18,d30
1401         vshr.u64        q12,q2,#19
1402         vshr.u64        q13,q2,#61
1403          vadd.i64       d18,d30                 @ h+=Maj from the past
1404         vshr.u64        q15,q2,#6
1405         vsli.64         q12,q2,#45
1406         vext.8          q14,q3,q4,#8    @ X[i+1]
1407         vsli.64         q13,q2,#3
1408         veor            q15,q12
1409         vshr.u64        q12,q14,#1
1410         veor            q15,q13                         @ sigma1(X[i+14])
1411         vshr.u64        q13,q14,#8
1412         vadd.i64        q3,q15
1413         vshr.u64        q15,q14,#7
1414         vsli.64         q12,q14,#63
1415         vsli.64         q13,q14,#56
1416         vext.8          q14,q7,q0,#8    @ X[i+9]
1417         veor            q15,q12
1418         vshr.u64        d24,d22,#14             @ from NEON_00_15
1419         vadd.i64        q3,q14
1420         vshr.u64        d25,d22,#18             @ from NEON_00_15
1421         veor            q15,q13                         @ sigma0(X[i+1])
1422         vshr.u64        d26,d22,#41             @ from NEON_00_15
1423         vadd.i64        q3,q15
1424         vld1.64         {d28},[r3,:64]! @ K[i++]
1425         vsli.64         d24,d22,#50
1426         vsli.64         d25,d22,#46
1427         vmov            d29,d22
1428         vsli.64         d26,d22,#23
1429 #if 22<16 && defined(__ARMEL__)
1430         vrev64.8        ,
1431 #endif
1432         veor            d25,d24
1433         vbsl            d29,d23,d16             @ Ch(e,f,g)
1434         vshr.u64        d24,d18,#28
1435         veor            d26,d25                 @ Sigma1(e)
1436         vadd.i64        d27,d29,d17
1437         vshr.u64        d25,d18,#34
1438         vsli.64         d24,d18,#36
1439         vadd.i64        d27,d26
1440         vshr.u64        d26,d18,#39
1441         vadd.i64        d28,d6
1442         vsli.64         d25,d18,#30
1443         veor            d30,d18,d19
1444         vsli.64         d26,d18,#25
1445         veor            d17,d24,d25
1446         vadd.i64        d27,d28
1447         vbsl            d30,d20,d19             @ Maj(a,b,c)
1448         veor            d17,d26                 @ Sigma0(a)
1449         vadd.i64        d21,d27
1450         vadd.i64        d30,d27
1451         @ vadd.i64      d17,d30
1452         vshr.u64        d24,d21,#14     @ 23
1453 #if 23<16
1454         vld1.64         {d7},[r1]!      @ handles unaligned
1455 #endif
1456         vshr.u64        d25,d21,#18
1457 #if 23>0
1458          vadd.i64       d17,d30                 @ h+=Maj from the past
1459 #endif
1460         vshr.u64        d26,d21,#41
1461         vld1.64         {d28},[r3,:64]! @ K[i++]
1462         vsli.64         d24,d21,#50
1463         vsli.64         d25,d21,#46
1464         vmov            d29,d21
1465         vsli.64         d26,d21,#23
1466 #if 23<16 && defined(__ARMEL__)
1467         vrev64.8        ,
1468 #endif
1469         veor            d25,d24
1470         vbsl            d29,d22,d23             @ Ch(e,f,g)
1471         vshr.u64        d24,d17,#28
1472         veor            d26,d25                 @ Sigma1(e)
1473         vadd.i64        d27,d29,d16
1474         vshr.u64        d25,d17,#34
1475         vsli.64         d24,d17,#36
1476         vadd.i64        d27,d26
1477         vshr.u64        d26,d17,#39
1478         vadd.i64        d28,d7
1479         vsli.64         d25,d17,#30
1480         veor            d30,d17,d18
1481         vsli.64         d26,d17,#25
1482         veor            d16,d24,d25
1483         vadd.i64        d27,d28
1484         vbsl            d30,d19,d18             @ Maj(a,b,c)
1485         veor            d16,d26                 @ Sigma0(a)
1486         vadd.i64        d20,d27
1487         vadd.i64        d30,d27
1488         @ vadd.i64      d16,d30
1489         vshr.u64        q12,q3,#19
1490         vshr.u64        q13,q3,#61
1491          vadd.i64       d16,d30                 @ h+=Maj from the past
1492         vshr.u64        q15,q3,#6
1493         vsli.64         q12,q3,#45
1494         vext.8          q14,q4,q5,#8    @ X[i+1]
1495         vsli.64         q13,q3,#3
1496         veor            q15,q12
1497         vshr.u64        q12,q14,#1
1498         veor            q15,q13                         @ sigma1(X[i+14])
1499         vshr.u64        q13,q14,#8
1500         vadd.i64        q4,q15
1501         vshr.u64        q15,q14,#7
1502         vsli.64         q12,q14,#63
1503         vsli.64         q13,q14,#56
1504         vext.8          q14,q0,q1,#8    @ X[i+9]
1505         veor            q15,q12
1506         vshr.u64        d24,d20,#14             @ from NEON_00_15
1507         vadd.i64        q4,q14
1508         vshr.u64        d25,d20,#18             @ from NEON_00_15
1509         veor            q15,q13                         @ sigma0(X[i+1])
1510         vshr.u64        d26,d20,#41             @ from NEON_00_15
1511         vadd.i64        q4,q15
1512         vld1.64         {d28},[r3,:64]! @ K[i++]
1513         vsli.64         d24,d20,#50
1514         vsli.64         d25,d20,#46
1515         vmov            d29,d20
1516         vsli.64         d26,d20,#23
1517 #if 24<16 && defined(__ARMEL__)
1518         vrev64.8        ,
1519 #endif
1520         veor            d25,d24
1521         vbsl            d29,d21,d22             @ Ch(e,f,g)
1522         vshr.u64        d24,d16,#28
1523         veor            d26,d25                 @ Sigma1(e)
1524         vadd.i64        d27,d29,d23
1525         vshr.u64        d25,d16,#34
1526         vsli.64         d24,d16,#36
1527         vadd.i64        d27,d26
1528         vshr.u64        d26,d16,#39
1529         vadd.i64        d28,d8
1530         vsli.64         d25,d16,#30
1531         veor            d30,d16,d17
1532         vsli.64         d26,d16,#25
1533         veor            d23,d24,d25
1534         vadd.i64        d27,d28
1535         vbsl            d30,d18,d17             @ Maj(a,b,c)
1536         veor            d23,d26                 @ Sigma0(a)
1537         vadd.i64        d19,d27
1538         vadd.i64        d30,d27
1539         @ vadd.i64      d23,d30
1540         vshr.u64        d24,d19,#14     @ 25
1541 #if 25<16
1542         vld1.64         {d9},[r1]!      @ handles unaligned
1543 #endif
1544         vshr.u64        d25,d19,#18
1545 #if 25>0
1546          vadd.i64       d23,d30                 @ h+=Maj from the past
1547 #endif
1548         vshr.u64        d26,d19,#41
1549         vld1.64         {d28},[r3,:64]! @ K[i++]
1550         vsli.64         d24,d19,#50
1551         vsli.64         d25,d19,#46
1552         vmov            d29,d19
1553         vsli.64         d26,d19,#23
1554 #if 25<16 && defined(__ARMEL__)
1555         vrev64.8        ,
1556 #endif
1557         veor            d25,d24
1558         vbsl            d29,d20,d21             @ Ch(e,f,g)
1559         vshr.u64        d24,d23,#28
1560         veor            d26,d25                 @ Sigma1(e)
1561         vadd.i64        d27,d29,d22
1562         vshr.u64        d25,d23,#34
1563         vsli.64         d24,d23,#36
1564         vadd.i64        d27,d26
1565         vshr.u64        d26,d23,#39
1566         vadd.i64        d28,d9
1567         vsli.64         d25,d23,#30
1568         veor            d30,d23,d16
1569         vsli.64         d26,d23,#25
1570         veor            d22,d24,d25
1571         vadd.i64        d27,d28
1572         vbsl            d30,d17,d16             @ Maj(a,b,c)
1573         veor            d22,d26                 @ Sigma0(a)
1574         vadd.i64        d18,d27
1575         vadd.i64        d30,d27
1576         @ vadd.i64      d22,d30
1577         vshr.u64        q12,q4,#19
1578         vshr.u64        q13,q4,#61
1579          vadd.i64       d22,d30                 @ h+=Maj from the past
1580         vshr.u64        q15,q4,#6
1581         vsli.64         q12,q4,#45
1582         vext.8          q14,q5,q6,#8    @ X[i+1]
1583         vsli.64         q13,q4,#3
1584         veor            q15,q12
1585         vshr.u64        q12,q14,#1
1586         veor            q15,q13                         @ sigma1(X[i+14])
1587         vshr.u64        q13,q14,#8
1588         vadd.i64        q5,q15
1589         vshr.u64        q15,q14,#7
1590         vsli.64         q12,q14,#63
1591         vsli.64         q13,q14,#56
1592         vext.8          q14,q1,q2,#8    @ X[i+9]
1593         veor            q15,q12
1594         vshr.u64        d24,d18,#14             @ from NEON_00_15
1595         vadd.i64        q5,q14
1596         vshr.u64        d25,d18,#18             @ from NEON_00_15
1597         veor            q15,q13                         @ sigma0(X[i+1])
1598         vshr.u64        d26,d18,#41             @ from NEON_00_15
1599         vadd.i64        q5,q15
1600         vld1.64         {d28},[r3,:64]! @ K[i++]
1601         vsli.64         d24,d18,#50
1602         vsli.64         d25,d18,#46
1603         vmov            d29,d18
1604         vsli.64         d26,d18,#23
1605 #if 26<16 && defined(__ARMEL__)
1606         vrev64.8        ,
1607 #endif
1608         veor            d25,d24
1609         vbsl            d29,d19,d20             @ Ch(e,f,g)
1610         vshr.u64        d24,d22,#28
1611         veor            d26,d25                 @ Sigma1(e)
1612         vadd.i64        d27,d29,d21
1613         vshr.u64        d25,d22,#34
1614         vsli.64         d24,d22,#36
1615         vadd.i64        d27,d26
1616         vshr.u64        d26,d22,#39
1617         vadd.i64        d28,d10
1618         vsli.64         d25,d22,#30
1619         veor            d30,d22,d23
1620         vsli.64         d26,d22,#25
1621         veor            d21,d24,d25
1622         vadd.i64        d27,d28
1623         vbsl            d30,d16,d23             @ Maj(a,b,c)
1624         veor            d21,d26                 @ Sigma0(a)
1625         vadd.i64        d17,d27
1626         vadd.i64        d30,d27
1627         @ vadd.i64      d21,d30
1628         vshr.u64        d24,d17,#14     @ 27
1629 #if 27<16
1630         vld1.64         {d11},[r1]!     @ handles unaligned
1631 #endif
1632         vshr.u64        d25,d17,#18
1633 #if 27>0
1634          vadd.i64       d21,d30                 @ h+=Maj from the past
1635 #endif
1636         vshr.u64        d26,d17,#41
1637         vld1.64         {d28},[r3,:64]! @ K[i++]
1638         vsli.64         d24,d17,#50
1639         vsli.64         d25,d17,#46
1640         vmov            d29,d17
1641         vsli.64         d26,d17,#23
1642 #if 27<16 && defined(__ARMEL__)
1643         vrev64.8        ,
1644 #endif
1645         veor            d25,d24
1646         vbsl            d29,d18,d19             @ Ch(e,f,g)
1647         vshr.u64        d24,d21,#28
1648         veor            d26,d25                 @ Sigma1(e)
1649         vadd.i64        d27,d29,d20
1650         vshr.u64        d25,d21,#34
1651         vsli.64         d24,d21,#36
1652         vadd.i64        d27,d26
1653         vshr.u64        d26,d21,#39
1654         vadd.i64        d28,d11
1655         vsli.64         d25,d21,#30
1656         veor            d30,d21,d22
1657         vsli.64         d26,d21,#25
1658         veor            d20,d24,d25
1659         vadd.i64        d27,d28
1660         vbsl            d30,d23,d22             @ Maj(a,b,c)
1661         veor            d20,d26                 @ Sigma0(a)
1662         vadd.i64        d16,d27
1663         vadd.i64        d30,d27
1664         @ vadd.i64      d20,d30
1665         vshr.u64        q12,q5,#19
1666         vshr.u64        q13,q5,#61
1667          vadd.i64       d20,d30                 @ h+=Maj from the past
1668         vshr.u64        q15,q5,#6
1669         vsli.64         q12,q5,#45
1670         vext.8          q14,q6,q7,#8    @ X[i+1]
1671         vsli.64         q13,q5,#3
1672         veor            q15,q12
1673         vshr.u64        q12,q14,#1
1674         veor            q15,q13                         @ sigma1(X[i+14])
1675         vshr.u64        q13,q14,#8
1676         vadd.i64        q6,q15
1677         vshr.u64        q15,q14,#7
1678         vsli.64         q12,q14,#63
1679         vsli.64         q13,q14,#56
1680         vext.8          q14,q2,q3,#8    @ X[i+9]
1681         veor            q15,q12
1682         vshr.u64        d24,d16,#14             @ from NEON_00_15
1683         vadd.i64        q6,q14
1684         vshr.u64        d25,d16,#18             @ from NEON_00_15
1685         veor            q15,q13                         @ sigma0(X[i+1])
1686         vshr.u64        d26,d16,#41             @ from NEON_00_15
1687         vadd.i64        q6,q15
1688         vld1.64         {d28},[r3,:64]! @ K[i++]
1689         vsli.64         d24,d16,#50
1690         vsli.64         d25,d16,#46
1691         vmov            d29,d16
1692         vsli.64         d26,d16,#23
1693 #if 28<16 && defined(__ARMEL__)
1694         vrev64.8        ,
1695 #endif
1696         veor            d25,d24
1697         vbsl            d29,d17,d18             @ Ch(e,f,g)
1698         vshr.u64        d24,d20,#28
1699         veor            d26,d25                 @ Sigma1(e)
1700         vadd.i64        d27,d29,d19
1701         vshr.u64        d25,d20,#34
1702         vsli.64         d24,d20,#36
1703         vadd.i64        d27,d26
1704         vshr.u64        d26,d20,#39
1705         vadd.i64        d28,d12
1706         vsli.64         d25,d20,#30
1707         veor            d30,d20,d21
1708         vsli.64         d26,d20,#25
1709         veor            d19,d24,d25
1710         vadd.i64        d27,d28
1711         vbsl            d30,d22,d21             @ Maj(a,b,c)
1712         veor            d19,d26                 @ Sigma0(a)
1713         vadd.i64        d23,d27
1714         vadd.i64        d30,d27
1715         @ vadd.i64      d19,d30
1716         vshr.u64        d24,d23,#14     @ 29
1717 #if 29<16
1718         vld1.64         {d13},[r1]!     @ handles unaligned
1719 #endif
1720         vshr.u64        d25,d23,#18
1721 #if 29>0
1722          vadd.i64       d19,d30                 @ h+=Maj from the past
1723 #endif
1724         vshr.u64        d26,d23,#41
1725         vld1.64         {d28},[r3,:64]! @ K[i++]
1726         vsli.64         d24,d23,#50
1727         vsli.64         d25,d23,#46
1728         vmov            d29,d23
1729         vsli.64         d26,d23,#23
1730 #if 29<16 && defined(__ARMEL__)
1731         vrev64.8        ,
1732 #endif
1733         veor            d25,d24
1734         vbsl            d29,d16,d17             @ Ch(e,f,g)
1735         vshr.u64        d24,d19,#28
1736         veor            d26,d25                 @ Sigma1(e)
1737         vadd.i64        d27,d29,d18
1738         vshr.u64        d25,d19,#34
1739         vsli.64         d24,d19,#36
1740         vadd.i64        d27,d26
1741         vshr.u64        d26,d19,#39
1742         vadd.i64        d28,d13
1743         vsli.64         d25,d19,#30
1744         veor            d30,d19,d20
1745         vsli.64         d26,d19,#25
1746         veor            d18,d24,d25
1747         vadd.i64        d27,d28
1748         vbsl            d30,d21,d20             @ Maj(a,b,c)
1749         veor            d18,d26                 @ Sigma0(a)
1750         vadd.i64        d22,d27
1751         vadd.i64        d30,d27
1752         @ vadd.i64      d18,d30
1753         vshr.u64        q12,q6,#19
1754         vshr.u64        q13,q6,#61
1755          vadd.i64       d18,d30                 @ h+=Maj from the past
1756         vshr.u64        q15,q6,#6
1757         vsli.64         q12,q6,#45
1758         vext.8          q14,q7,q0,#8    @ X[i+1]
1759         vsli.64         q13,q6,#3
1760         veor            q15,q12
1761         vshr.u64        q12,q14,#1
1762         veor            q15,q13                         @ sigma1(X[i+14])
1763         vshr.u64        q13,q14,#8
1764         vadd.i64        q7,q15
1765         vshr.u64        q15,q14,#7
1766         vsli.64         q12,q14,#63
1767         vsli.64         q13,q14,#56
1768         vext.8          q14,q3,q4,#8    @ X[i+9]
1769         veor            q15,q12
1770         vshr.u64        d24,d22,#14             @ from NEON_00_15
1771         vadd.i64        q7,q14
1772         vshr.u64        d25,d22,#18             @ from NEON_00_15
1773         veor            q15,q13                         @ sigma0(X[i+1])
1774         vshr.u64        d26,d22,#41             @ from NEON_00_15
1775         vadd.i64        q7,q15
1776         vld1.64         {d28},[r3,:64]! @ K[i++]
1777         vsli.64         d24,d22,#50
1778         vsli.64         d25,d22,#46
1779         vmov            d29,d22
1780         vsli.64         d26,d22,#23
1781 #if 30<16 && defined(__ARMEL__)
1782         vrev64.8        ,
1783 #endif
1784         veor            d25,d24
1785         vbsl            d29,d23,d16             @ Ch(e,f,g)
1786         vshr.u64        d24,d18,#28
1787         veor            d26,d25                 @ Sigma1(e)
1788         vadd.i64        d27,d29,d17
1789         vshr.u64        d25,d18,#34
1790         vsli.64         d24,d18,#36
1791         vadd.i64        d27,d26
1792         vshr.u64        d26,d18,#39
1793         vadd.i64        d28,d14
1794         vsli.64         d25,d18,#30
1795         veor            d30,d18,d19
1796         vsli.64         d26,d18,#25
1797         veor            d17,d24,d25
1798         vadd.i64        d27,d28
1799         vbsl            d30,d20,d19             @ Maj(a,b,c)
1800         veor            d17,d26                 @ Sigma0(a)
1801         vadd.i64        d21,d27
1802         vadd.i64        d30,d27
1803         @ vadd.i64      d17,d30
1804         vshr.u64        d24,d21,#14     @ 31
1805 #if 31<16
1806         vld1.64         {d15},[r1]!     @ handles unaligned
1807 #endif
1808         vshr.u64        d25,d21,#18
1809 #if 31>0
1810          vadd.i64       d17,d30                 @ h+=Maj from the past
1811 #endif
1812         vshr.u64        d26,d21,#41
1813         vld1.64         {d28},[r3,:64]! @ K[i++]
1814         vsli.64         d24,d21,#50
1815         vsli.64         d25,d21,#46
1816         vmov            d29,d21
1817         vsli.64         d26,d21,#23
1818 #if 31<16 && defined(__ARMEL__)
1819         vrev64.8        ,
1820 #endif
1821         veor            d25,d24
1822         vbsl            d29,d22,d23             @ Ch(e,f,g)
1823         vshr.u64        d24,d17,#28
1824         veor            d26,d25                 @ Sigma1(e)
1825         vadd.i64        d27,d29,d16
1826         vshr.u64        d25,d17,#34
1827         vsli.64         d24,d17,#36
1828         vadd.i64        d27,d26
1829         vshr.u64        d26,d17,#39
1830         vadd.i64        d28,d15
1831         vsli.64         d25,d17,#30
1832         veor            d30,d17,d18
1833         vsli.64         d26,d17,#25
1834         veor            d16,d24,d25
1835         vadd.i64        d27,d28
1836         vbsl            d30,d19,d18             @ Maj(a,b,c)
1837         veor            d16,d26                 @ Sigma0(a)
1838         vadd.i64        d20,d27
1839         vadd.i64        d30,d27
1840         @ vadd.i64      d16,d30
1841         bne             .L16_79_neon
1842
1843          vadd.i64       d16,d30         @ h+=Maj from the past
1844         vldmia          r0,{d24-d31}    @ load context to temp
1845         vadd.i64        q8,q12          @ vectorized accumulate
1846         vadd.i64        q9,q13
1847         vadd.i64        q10,q14
1848         vadd.i64        q11,q15
1849         vstmia          r0,{d16-d23}    @ save context
1850         teq             r1,r2
1851         sub             r3,#640 @ rewind K512
1852         bne             .Loop_neon
1853
1854         VFP_ABI_POP
1855         bx      lr                              @ .word 0xe12fff1e
1856 .size   sha512_block_data_order_neon,.-sha512_block_data_order_neon
1857 #endif
1858 .asciz  "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
1859 .align  2
1860 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
1861 .comm   OPENSSL_armcap_P,4,4
1862 #endif