1 ########################################################################
2 # Implement fast SHA-512 with AVX2 instructions. (x86_64)
4 # Copyright (C) 2013 Intel Corporation.
7 # James Guilford <james.guilford@intel.com>
8 # Kirk Yap <kirk.s.yap@intel.com>
9 # David Cote <david.m.cote@intel.com>
10 # Tim Chen <tim.c.chen@linux.intel.com>
12 # This software is available to you under a choice of one of two
13 # licenses. You may choose to be licensed under the terms of the GNU
14 # General Public License (GPL) Version 2, available from the file
15 # COPYING in the main directory of this source tree, or the
16 # OpenIB.org BSD license below:
18 # Redistribution and use in source and binary forms, with or
19 # without modification, are permitted provided that the following
22 # - Redistributions of source code must retain the above
23 # copyright notice, this list of conditions and the following
26 # - Redistributions in binary form must reproduce the above
27 # copyright notice, this list of conditions and the following
28 # disclaimer in the documentation and/or other materials
29 # provided with the distribution.
31 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
35 # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
36 # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
37 # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 ########################################################################
42 # This code is described in an Intel White-Paper:
43 # "Fast SHA-512 Implementations on Intel Architecture Processors"
45 # To find it, surf to http://www.intel.com/p/en_US/embedded
46 # and search for that title.
48 ########################################################################
49 # This code schedules 1 blocks at a time, with 4 lanes per block
50 ########################################################################
52 #include <linux/linkage.h>
69 BYTE_FLIP_MASK = %ymm9
71 # 1st arg is %rdi, which is saved to the stack and accessed later via %r12
84 TBL = %rdi # clobbers CTX1
94 T1 = %r12 # clobbers CTX2
99 # Local variables (stack frame)
107 frame_SRND = frame_XFER + XFER_SIZE
108 frame_INP = frame_SRND + SRND_SIZE
109 frame_INPEND = frame_INP + INP_SIZE
110 frame_CTX = frame_INPEND + INPEND_SIZE
111 frame_size = frame_CTX + CTX_SIZE
113 ## assume buffers not aligned
114 #define VMOVDQ vmovdqu
117 # Add reg to mem using reg-mem add and store
124 # COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask
125 # Load ymm with mem and byte swap each dword
126 .macro COPY_YMM_AND_BSWAP p1 p2 p3
128 vpshufb \p3, \p1, \p1
131 # Rotate values of symbols Y0...Y3
142 # Rotate symbols a..h right
155 # macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL
156 # YDST = {YSRC1, YSRC2} >> RVAL*8
157 .macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL
158 vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI}
159 vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8
162 .macro FOUR_ROUNDS_AND_SCHED
163 ################################### RND N + 0 #########################################
166 MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7]
167 # Calculate w[t-16] + w[t-7]
168 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
170 MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15]
174 # Calculate w[t-15] ror 1
175 vpsrlq $1, YTMP1, YTMP2
176 vpsllq $(64-1), YTMP1, YTMP3
177 vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1
178 # Calculate w[t-15] shr 7
179 vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7
181 mov a, y3 # y3 = a # MAJA
182 rorx $41, e, y0 # y0 = e >> 41 # S1A
183 rorx $18, e, y1 # y1 = e >> 18 # S1B
184 add frame_XFER(%rsp),h # h = k + w + h # --
185 or c, y3 # y3 = a|c # MAJA
186 mov f, y2 # y2 = f # CH
187 rorx $34, a, T1 # T1 = a >> 34 # S0B
189 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
190 xor g, y2 # y2 = f^g # CH
191 rorx $14, e, y1 # y1 = (e >> 14) # S1
193 and e, y2 # y2 = (f^g)&e # CH
194 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
195 rorx $39, a, y1 # y1 = a >> 39 # S0A
196 add h, d # d = k + w + h + d # --
198 and b, y3 # y3 = (a|c)&b # MAJA
199 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
200 rorx $28, a, T1 # T1 = (a >> 28) # S0
202 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
203 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
204 mov a, T1 # T1 = a # MAJB
205 and c, T1 # T1 = a&c # MAJB
207 add y0, y2 # y2 = S1 + CH # --
208 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
209 add y1, h # h = k + w + h + S0 # --
211 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
213 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
214 add y3, h # h = t1 + S0 + MAJ # --
218 ################################### RND N + 1 #########################################
220 # Calculate w[t-15] ror 8
221 vpsrlq $8, YTMP1, YTMP2
222 vpsllq $(64-8), YTMP1, YTMP1
223 vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8
224 # XOR the three components
225 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
226 vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0
229 # Add three components, w[t-16], w[t-7] and sigma0
230 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
231 # Move to appropriate lanes for calculating w[16] and w[17]
232 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
233 # Move to appropriate lanes for calculating w[18] and w[19]
234 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00}
236 # Calculate w[16] and w[17] in both 128 bit lanes
238 # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes
239 vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA}
240 vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA}
243 mov a, y3 # y3 = a # MAJA
244 rorx $41, e, y0 # y0 = e >> 41 # S1A
245 rorx $18, e, y1 # y1 = e >> 18 # S1B
246 add 1*8+frame_XFER(%rsp), h # h = k + w + h # --
247 or c, y3 # y3 = a|c # MAJA
250 mov f, y2 # y2 = f # CH
251 rorx $34, a, T1 # T1 = a >> 34 # S0B
252 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
253 xor g, y2 # y2 = f^g # CH
256 rorx $14, e, y1 # y1 = (e >> 14) # S1
257 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
258 rorx $39, a, y1 # y1 = a >> 39 # S0A
259 and e, y2 # y2 = (f^g)&e # CH
260 add h, d # d = k + w + h + d # --
262 and b, y3 # y3 = (a|c)&b # MAJA
263 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
265 rorx $28, a, T1 # T1 = (a >> 28) # S0
266 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
268 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
269 mov a, T1 # T1 = a # MAJB
270 and c, T1 # T1 = a&c # MAJB
271 add y0, y2 # y2 = S1 + CH # --
273 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
274 add y1, h # h = k + w + h + S0 # --
276 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
277 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
278 add y3, h # h = t1 + S0 + MAJ # --
283 ################################### RND N + 2 #########################################
285 vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA}
286 vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA}
287 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA}
288 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
289 vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA}
290 vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA}
291 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA}
292 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
293 # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA}
295 # Add sigma1 to the other compunents to get w[16] and w[17]
296 vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]}
298 # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane
299 vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--}
301 mov a, y3 # y3 = a # MAJA
302 rorx $41, e, y0 # y0 = e >> 41 # S1A
303 add 2*8+frame_XFER(%rsp), h # h = k + w + h # --
305 rorx $18, e, y1 # y1 = e >> 18 # S1B
306 or c, y3 # y3 = a|c # MAJA
307 mov f, y2 # y2 = f # CH
308 xor g, y2 # y2 = f^g # CH
310 rorx $34, a, T1 # T1 = a >> 34 # S0B
311 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
312 and e, y2 # y2 = (f^g)&e # CH
314 rorx $14, e, y1 # y1 = (e >> 14) # S1
315 add h, d # d = k + w + h + d # --
316 and b, y3 # y3 = (a|c)&b # MAJA
318 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
319 rorx $39, a, y1 # y1 = a >> 39 # S0A
320 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
322 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
323 rorx $28, a, T1 # T1 = (a >> 28) # S0
325 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
326 mov a, T1 # T1 = a # MAJB
327 and c, T1 # T1 = a&c # MAJB
328 add y0, y2 # y2 = S1 + CH # --
330 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
331 add y1, h # h = k + w + h + S0 # --
332 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
333 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
335 add y3, h # h = t1 + S0 + MAJ # --
339 ################################### RND N + 3 #########################################
341 vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--}
342 vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--}
343 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--}
344 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
345 vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--}
346 vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--}
347 vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--}
348 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^
349 # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--}
351 # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19]
352 # to newly calculated sigma1 to get w[18] and w[19]
353 vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --}
355 # Form w[19, w[18], w17], w[16]
356 vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]}
358 mov a, y3 # y3 = a # MAJA
359 rorx $41, e, y0 # y0 = e >> 41 # S1A
360 rorx $18, e, y1 # y1 = e >> 18 # S1B
361 add 3*8+frame_XFER(%rsp), h # h = k + w + h # --
362 or c, y3 # y3 = a|c # MAJA
365 mov f, y2 # y2 = f # CH
366 rorx $34, a, T1 # T1 = a >> 34 # S0B
367 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
368 xor g, y2 # y2 = f^g # CH
371 rorx $14, e, y1 # y1 = (e >> 14) # S1
372 and e, y2 # y2 = (f^g)&e # CH
373 add h, d # d = k + w + h + d # --
374 and b, y3 # y3 = (a|c)&b # MAJA
376 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
377 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
379 rorx $39, a, y1 # y1 = a >> 39 # S0A
380 add y0, y2 # y2 = S1 + CH # --
382 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
383 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
385 rorx $28, a, T1 # T1 = (a >> 28) # S0
387 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
388 mov a, T1 # T1 = a # MAJB
389 and c, T1 # T1 = a&c # MAJB
390 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
392 add y1, h # h = k + w + h + S0 # --
393 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
394 add y3, h # h = t1 + S0 + MAJ # --
403 ################################### RND N + 0 #########################################
405 mov f, y2 # y2 = f # CH
406 rorx $41, e, y0 # y0 = e >> 41 # S1A
407 rorx $18, e, y1 # y1 = e >> 18 # S1B
408 xor g, y2 # y2 = f^g # CH
410 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
411 rorx $14, e, y1 # y1 = (e >> 14) # S1
412 and e, y2 # y2 = (f^g)&e # CH
414 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
415 rorx $34, a, T1 # T1 = a >> 34 # S0B
416 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
417 rorx $39, a, y1 # y1 = a >> 39 # S0A
418 mov a, y3 # y3 = a # MAJA
420 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
421 rorx $28, a, T1 # T1 = (a >> 28) # S0
422 add frame_XFER(%rsp), h # h = k + w + h # --
423 or c, y3 # y3 = a|c # MAJA
425 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
426 mov a, T1 # T1 = a # MAJB
427 and b, y3 # y3 = (a|c)&b # MAJA
428 and c, T1 # T1 = a&c # MAJB
429 add y0, y2 # y2 = S1 + CH # --
431 add h, d # d = k + w + h + d # --
432 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
433 add y1, h # h = k + w + h + S0 # --
435 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
439 ################################### RND N + 1 #########################################
441 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
442 mov f, y2 # y2 = f # CH
443 rorx $41, e, y0 # y0 = e >> 41 # S1A
444 rorx $18, e, y1 # y1 = e >> 18 # S1B
445 xor g, y2 # y2 = f^g # CH
447 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
448 rorx $14, e, y1 # y1 = (e >> 14) # S1
449 and e, y2 # y2 = (f^g)&e # CH
450 add y3, old_h # h = t1 + S0 + MAJ # --
452 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
453 rorx $34, a, T1 # T1 = a >> 34 # S0B
454 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
455 rorx $39, a, y1 # y1 = a >> 39 # S0A
456 mov a, y3 # y3 = a # MAJA
458 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
459 rorx $28, a, T1 # T1 = (a >> 28) # S0
460 add 8*1+frame_XFER(%rsp), h # h = k + w + h # --
461 or c, y3 # y3 = a|c # MAJA
463 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
464 mov a, T1 # T1 = a # MAJB
465 and b, y3 # y3 = (a|c)&b # MAJA
466 and c, T1 # T1 = a&c # MAJB
467 add y0, y2 # y2 = S1 + CH # --
469 add h, d # d = k + w + h + d # --
470 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
471 add y1, h # h = k + w + h + S0 # --
473 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
477 ################################### RND N + 2 #########################################
479 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
480 mov f, y2 # y2 = f # CH
481 rorx $41, e, y0 # y0 = e >> 41 # S1A
482 rorx $18, e, y1 # y1 = e >> 18 # S1B
483 xor g, y2 # y2 = f^g # CH
485 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
486 rorx $14, e, y1 # y1 = (e >> 14) # S1
487 and e, y2 # y2 = (f^g)&e # CH
488 add y3, old_h # h = t1 + S0 + MAJ # --
490 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
491 rorx $34, a, T1 # T1 = a >> 34 # S0B
492 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
493 rorx $39, a, y1 # y1 = a >> 39 # S0A
494 mov a, y3 # y3 = a # MAJA
496 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
497 rorx $28, a, T1 # T1 = (a >> 28) # S0
498 add 8*2+frame_XFER(%rsp), h # h = k + w + h # --
499 or c, y3 # y3 = a|c # MAJA
501 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
502 mov a, T1 # T1 = a # MAJB
503 and b, y3 # y3 = (a|c)&b # MAJA
504 and c, T1 # T1 = a&c # MAJB
505 add y0, y2 # y2 = S1 + CH # --
507 add h, d # d = k + w + h + d # --
508 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
509 add y1, h # h = k + w + h + S0 # --
511 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
515 ################################### RND N + 3 #########################################
517 add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
518 mov f, y2 # y2 = f # CH
519 rorx $41, e, y0 # y0 = e >> 41 # S1A
520 rorx $18, e, y1 # y1 = e >> 18 # S1B
521 xor g, y2 # y2 = f^g # CH
523 xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1
524 rorx $14, e, y1 # y1 = (e >> 14) # S1
525 and e, y2 # y2 = (f^g)&e # CH
526 add y3, old_h # h = t1 + S0 + MAJ # --
528 xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1
529 rorx $34, a, T1 # T1 = a >> 34 # S0B
530 xor g, y2 # y2 = CH = ((f^g)&e)^g # CH
531 rorx $39, a, y1 # y1 = a >> 39 # S0A
532 mov a, y3 # y3 = a # MAJA
534 xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0
535 rorx $28, a, T1 # T1 = (a >> 28) # S0
536 add 8*3+frame_XFER(%rsp), h # h = k + w + h # --
537 or c, y3 # y3 = a|c # MAJA
539 xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0
540 mov a, T1 # T1 = a # MAJB
541 and b, y3 # y3 = (a|c)&b # MAJA
542 and c, T1 # T1 = a&c # MAJB
543 add y0, y2 # y2 = S1 + CH # --
546 add h, d # d = k + w + h + d # --
547 or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ
548 add y1, h # h = k + w + h + S0 # --
550 add y2, d # d = k + w + h + d + S1 + CH = d + t1 # --
552 add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# --
554 add y3, h # h = t1 + S0 + MAJ # --
560 ########################################################################
561 # void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks)
562 # Purpose: Updates the SHA512 digest stored at "state" with the message
564 # The size of the message pointed to by "data" must be an integer multiple
565 # of SHA512 message blocks.
566 # "blocks" is the message length in SHA512 blocks
567 ########################################################################
568 SYM_FUNC_START(sha512_transform_rorx)
576 # Allocate Stack Space
579 sub $frame_size, %rsp
580 and $~(0x20 - 1), %rsp
582 shl $7, NUM_BLKS # convert to bytes
584 add INP, NUM_BLKS # pointer to end of data
585 mov NUM_BLKS, frame_INPEND(%rsp)
587 ## load initial digest
597 # save %rdi (CTX) before it gets clobbered
598 mov %rdi, frame_CTX(%rsp)
600 vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
605 ## byte swap first 16 dwords
606 COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK
607 COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK
608 COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK
609 COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK
611 mov INP, frame_INP(%rsp)
613 ## schedule 64 input dwords, by doing 12 rounds of 4 each
614 movq $4, frame_SRND(%rsp)
618 vpaddq (TBL), Y_0, XFER
619 vmovdqa XFER, frame_XFER(%rsp)
620 FOUR_ROUNDS_AND_SCHED
622 vpaddq 1*32(TBL), Y_0, XFER
623 vmovdqa XFER, frame_XFER(%rsp)
624 FOUR_ROUNDS_AND_SCHED
626 vpaddq 2*32(TBL), Y_0, XFER
627 vmovdqa XFER, frame_XFER(%rsp)
628 FOUR_ROUNDS_AND_SCHED
630 vpaddq 3*32(TBL), Y_0, XFER
631 vmovdqa XFER, frame_XFER(%rsp)
633 FOUR_ROUNDS_AND_SCHED
635 subq $1, frame_SRND(%rsp)
638 movq $2, frame_SRND(%rsp)
640 vpaddq (TBL), Y_0, XFER
641 vmovdqa XFER, frame_XFER(%rsp)
643 vpaddq 1*32(TBL), Y_1, XFER
644 vmovdqa XFER, frame_XFER(%rsp)
651 subq $1, frame_SRND(%rsp)
654 mov frame_CTX(%rsp), CTX2
664 mov frame_INP(%rsp), INP
666 cmp frame_INPEND(%rsp), INP
671 # Restore Stack Pointer
683 SYM_FUNC_END(sha512_transform_rorx)
685 ########################################################################
689 # Mergeable 640-byte rodata section. This allows linker to merge the table
690 # with other, exactly the same 640-byte fragment of another rodata section
691 # (if such section exists).
692 .section .rodata.cst640.K512, "aM", @progbits, 640
694 # K[t] used in SHA512 hashing
696 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
697 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
698 .quad 0x3956c25bf348b538,0x59f111f1b605d019
699 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
700 .quad 0xd807aa98a3030242,0x12835b0145706fbe
701 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
702 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
703 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
704 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
705 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
706 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
707 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
708 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
709 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
710 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
711 .quad 0x06ca6351e003826f,0x142929670a0e6e70
712 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
713 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
714 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
715 .quad 0x81c2c92e47edaee6,0x92722c851482353b
716 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
717 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
718 .quad 0xd192e819d6ef5218,0xd69906245565a910
719 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
720 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
721 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
722 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
723 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
724 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
725 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
726 .quad 0x90befffa23631e28,0xa4506cebde82bde9
727 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
728 .quad 0xca273eceea26619c,0xd186b8c721c0c207
729 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
730 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
731 .quad 0x113f9804bef90dae,0x1b710b35131c471b
732 .quad 0x28db77f523047d84,0x32caab7b40c72493
733 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
734 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
735 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
737 .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
739 # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
740 PSHUFFLE_BYTE_FLIP_MASK:
741 .octa 0x08090a0b0c0d0e0f0001020304050607
742 .octa 0x18191a1b1c1d1e1f1011121314151617
744 .section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32
747 .octa 0x00000000000000000000000000000000
748 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF