1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * SM4 Cipher Algorithm, AES-NI/AVX optimized.
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
12 /* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
13 * https://github.com/mjosaarinen/sm4ni
16 #include <linux/linkage.h>
17 #include <linux/cfi_types.h>
18 #include <asm/frame.h>
24 #define MASK_4BIT %xmm2
45 /* Transpose four 32-bit words between 128-bit vectors. */
46 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \
47 vpunpckhdq x1, x0, t2; \
48 vpunpckldq x1, x0, x0; \
50 vpunpckldq x3, x2, t1; \
51 vpunpckhdq x3, x2, x2; \
53 vpunpckhqdq t1, x0, x1; \
54 vpunpcklqdq t1, x0, x0; \
56 vpunpckhqdq x2, t2, x3; \
57 vpunpcklqdq x2, t2, x2;
59 /* pre-SubByte transform. */
60 #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
61 vpand x, mask4bit, tmp0; \
62 vpandn x, mask4bit, x; \
65 vpshufb tmp0, lo_t, tmp0; \
69 /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
70 * 'vaeslastenc' instruction.
72 #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
73 vpandn mask4bit, x, tmp0; \
75 vpand x, mask4bit, x; \
77 vpshufb tmp0, lo_t, tmp0; \
82 .section .rodata.cst16, "aM", @progbits, 16
86 * Following four affine transform look-up tables are from work by
87 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
89 * These allow exposing SM4 S-Box from AES SubByte.
92 /* pre-SubByte affine transform, from SM4 field to AES field. */
94 .quad 0x9197E2E474720701, 0xC7C1B4B222245157
96 .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
98 /* post-SubByte affine transform, from AES field to SM4 field. */
100 .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
102 .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
104 /* For isolating SubBytes from AESENCLAST, inverse shift row */
106 .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
107 .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
109 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
110 .Linv_shift_row_rol_8:
111 .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
112 .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
114 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
115 .Linv_shift_row_rol_16:
116 .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
117 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
119 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
120 .Linv_shift_row_rol_24:
121 .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
122 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
124 /* For CTR-mode IV byteswap */
126 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
128 /* For input word byte-swap */
130 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
137 /* 12 bytes, only for padding */
139 .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
146 * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst,
147 * const u8 *src, int nblocks)
150 SYM_FUNC_START(sm4_aesni_avx_crypt4)
152 * %rdi: round key array, CTX
153 * %rsi: dst (1..4 blocks)
154 * %rdx: src (1..4 blocks)
155 * %rcx: num blocks (1..4)
159 vmovdqu 0*16(%rdx), RA0;
164 jb .Lblk4_load_input_done;
165 vmovdqu 1*16(%rdx), RA1;
166 je .Lblk4_load_input_done;
167 vmovdqu 2*16(%rdx), RA2;
169 je .Lblk4_load_input_done;
170 vmovdqu 3*16(%rdx), RA3;
172 .Lblk4_load_input_done:
174 vmovdqa .Lbswap32_mask rRIP, RTMP2;
175 vpshufb RTMP2, RA0, RA0;
176 vpshufb RTMP2, RA1, RA1;
177 vpshufb RTMP2, RA2, RA2;
178 vpshufb RTMP2, RA3, RA3;
180 vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
181 vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
182 vmovdqa .Lpre_tf_hi_s rRIP, RB0;
183 vmovdqa .Lpost_tf_lo_s rRIP, RB1;
184 vmovdqa .Lpost_tf_hi_s rRIP, RB2;
185 vmovdqa .Linv_shift_row rRIP, RB3;
186 vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;
187 vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;
188 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
190 #define ROUND(round, s0, s1, s2, s3) \
191 vbroadcastss (4*(round))(%rdi), RX0; \
192 vpxor s1, RX0, RX0; \
193 vpxor s2, RX0, RX0; \
194 vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
196 /* sbox, non-linear part */ \
197 transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \
198 vaesenclast MASK_4BIT, RX0, RX0; \
199 transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \
202 vpshufb RB3, RX0, RTMP0; \
203 vpxor RTMP0, s0, s0; /* s0 ^ x */ \
204 vpshufb RTMP2, RX0, RTMP1; \
205 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
206 vpshufb RTMP3, RX0, RTMP1; \
207 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
208 vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \
209 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
210 vpslld $2, RTMP0, RTMP1; \
211 vpsrld $30, RTMP0, RTMP0; \
212 vpxor RTMP0, s0, s0; \
213 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
216 leaq (32*4)(%rdi), %rax;
219 ROUND(0, RA0, RA1, RA2, RA3);
220 ROUND(1, RA1, RA2, RA3, RA0);
221 ROUND(2, RA2, RA3, RA0, RA1);
222 ROUND(3, RA3, RA0, RA1, RA2);
223 leaq (4*4)(%rdi), %rdi;
225 jne .Lroundloop_blk4;
229 vmovdqa .Lbswap128_mask rRIP, RTMP2;
231 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
232 vpshufb RTMP2, RA0, RA0;
233 vpshufb RTMP2, RA1, RA1;
234 vpshufb RTMP2, RA2, RA2;
235 vpshufb RTMP2, RA3, RA3;
237 vmovdqu RA0, 0*16(%rsi);
239 jb .Lblk4_store_output_done;
240 vmovdqu RA1, 1*16(%rsi);
241 je .Lblk4_store_output_done;
242 vmovdqu RA2, 2*16(%rsi);
244 je .Lblk4_store_output_done;
245 vmovdqu RA3, 3*16(%rsi);
247 .Lblk4_store_output_done:
251 SYM_FUNC_END(sm4_aesni_avx_crypt4)
254 SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
256 * %rdi: round key array, CTX
257 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
260 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
265 vmovdqa .Lbswap32_mask rRIP, RTMP2;
266 vpshufb RTMP2, RA0, RA0;
267 vpshufb RTMP2, RA1, RA1;
268 vpshufb RTMP2, RA2, RA2;
269 vpshufb RTMP2, RA3, RA3;
270 vpshufb RTMP2, RB0, RB0;
271 vpshufb RTMP2, RB1, RB1;
272 vpshufb RTMP2, RB2, RB2;
273 vpshufb RTMP2, RB3, RB3;
275 vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
276 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
277 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
279 #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
280 vbroadcastss (4*(round))(%rdi), RX0; \
281 vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \
282 vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \
284 vpxor s1, RX0, RX0; \
285 vpxor s2, RX0, RX0; \
286 vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
287 vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \
288 vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \
289 vpxor r1, RX1, RX1; \
290 vpxor r2, RX1, RX1; \
291 vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
293 /* sbox, non-linear part */ \
294 transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
295 transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
296 vmovdqa .Linv_shift_row rRIP, RTMP4; \
297 vaesenclast MASK_4BIT, RX0, RX0; \
298 vaesenclast MASK_4BIT, RX1, RX1; \
299 transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
300 transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
303 vpshufb RTMP4, RX0, RTMP0; \
304 vpxor RTMP0, s0, s0; /* s0 ^ x */ \
305 vpshufb RTMP4, RX1, RTMP2; \
306 vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \
307 vpxor RTMP2, r0, r0; /* r0 ^ x */ \
308 vpshufb RTMP4, RX0, RTMP1; \
309 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
310 vpshufb RTMP4, RX1, RTMP3; \
311 vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \
312 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \
313 vpshufb RTMP4, RX0, RTMP1; \
314 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
315 vpshufb RTMP4, RX1, RTMP3; \
316 vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \
317 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \
318 vpshufb RTMP4, RX0, RTMP1; \
319 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
320 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
321 vpslld $2, RTMP0, RTMP1; \
322 vpsrld $30, RTMP0, RTMP0; \
323 vpxor RTMP0, s0, s0; \
324 vpxor RTMP1, s0, s0; \
325 vpshufb RTMP4, RX1, RTMP3; \
326 vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
327 /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
328 vpslld $2, RTMP2, RTMP3; \
329 vpsrld $30, RTMP2, RTMP2; \
330 vpxor RTMP2, r0, r0; \
333 leaq (32*4)(%rdi), %rax;
336 ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
337 ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
338 ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
339 ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
340 leaq (4*4)(%rdi), %rdi;
342 jne .Lroundloop_blk8;
346 vmovdqa .Lbswap128_mask rRIP, RTMP2;
348 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
349 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
350 vpshufb RTMP2, RA0, RA0;
351 vpshufb RTMP2, RA1, RA1;
352 vpshufb RTMP2, RA2, RA2;
353 vpshufb RTMP2, RA3, RA3;
354 vpshufb RTMP2, RB0, RB0;
355 vpshufb RTMP2, RB1, RB1;
356 vpshufb RTMP2, RB2, RB2;
357 vpshufb RTMP2, RB3, RB3;
361 SYM_FUNC_END(__sm4_crypt_blk8)
364 * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst,
365 * const u8 *src, int nblocks)
368 SYM_FUNC_START(sm4_aesni_avx_crypt8)
370 * %rdi: round key array, CTX
371 * %rsi: dst (1..8 blocks)
372 * %rdx: src (1..8 blocks)
373 * %rcx: num blocks (1..8)
376 jb sm4_aesni_avx_crypt4;
380 vmovdqu (0 * 16)(%rdx), RA0;
381 vmovdqu (1 * 16)(%rdx), RA1;
382 vmovdqu (2 * 16)(%rdx), RA2;
383 vmovdqu (3 * 16)(%rdx), RA3;
384 vmovdqu (4 * 16)(%rdx), RB0;
388 je .Lblk8_load_input_done;
389 vmovdqu (5 * 16)(%rdx), RB1;
391 jb .Lblk8_load_input_done;
392 vmovdqu (6 * 16)(%rdx), RB2;
393 je .Lblk8_load_input_done;
394 vmovdqu (7 * 16)(%rdx), RB3;
396 .Lblk8_load_input_done:
397 call __sm4_crypt_blk8;
400 vmovdqu RA0, (0 * 16)(%rsi);
401 vmovdqu RA1, (1 * 16)(%rsi);
402 vmovdqu RA2, (2 * 16)(%rsi);
403 vmovdqu RA3, (3 * 16)(%rsi);
404 vmovdqu RB0, (4 * 16)(%rsi);
405 jb .Lblk8_store_output_done;
406 vmovdqu RB1, (5 * 16)(%rsi);
407 je .Lblk8_store_output_done;
408 vmovdqu RB2, (6 * 16)(%rsi);
410 je .Lblk8_store_output_done;
411 vmovdqu RB3, (7 * 16)(%rsi);
413 .Lblk8_store_output_done:
417 SYM_FUNC_END(sm4_aesni_avx_crypt8)
420 * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
421 * const u8 *src, u8 *iv)
424 SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
426 * %rdi: round key array, CTX
427 * %rsi: dst (8 blocks)
428 * %rdx: src (8 blocks)
429 * %rcx: iv (big endian, 128bit)
433 /* load IV and byteswap */
436 vmovdqa .Lbswap128_mask rRIP, RBSWAP;
437 vpshufb RBSWAP, RA0, RTMP0; /* be => le */
439 vpcmpeqd RNOT, RNOT, RNOT;
440 vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
442 #define inc_le128(x, minus_one, tmp) \
443 vpcmpeqq minus_one, x, tmp; \
444 vpsubq minus_one, x, x; \
445 vpslldq $8, tmp, tmp; \
449 inc_le128(RTMP0, RNOT, RTMP2); /* +1 */
450 vpshufb RBSWAP, RTMP0, RA1;
451 inc_le128(RTMP0, RNOT, RTMP2); /* +2 */
452 vpshufb RBSWAP, RTMP0, RA2;
453 inc_le128(RTMP0, RNOT, RTMP2); /* +3 */
454 vpshufb RBSWAP, RTMP0, RA3;
455 inc_le128(RTMP0, RNOT, RTMP2); /* +4 */
456 vpshufb RBSWAP, RTMP0, RB0;
457 inc_le128(RTMP0, RNOT, RTMP2); /* +5 */
458 vpshufb RBSWAP, RTMP0, RB1;
459 inc_le128(RTMP0, RNOT, RTMP2); /* +6 */
460 vpshufb RBSWAP, RTMP0, RB2;
461 inc_le128(RTMP0, RNOT, RTMP2); /* +7 */
462 vpshufb RBSWAP, RTMP0, RB3;
463 inc_le128(RTMP0, RNOT, RTMP2); /* +8 */
464 vpshufb RBSWAP, RTMP0, RTMP1;
467 vmovdqu RTMP1, (%rcx);
469 call __sm4_crypt_blk8;
471 vpxor (0 * 16)(%rdx), RA0, RA0;
472 vpxor (1 * 16)(%rdx), RA1, RA1;
473 vpxor (2 * 16)(%rdx), RA2, RA2;
474 vpxor (3 * 16)(%rdx), RA3, RA3;
475 vpxor (4 * 16)(%rdx), RB0, RB0;
476 vpxor (5 * 16)(%rdx), RB1, RB1;
477 vpxor (6 * 16)(%rdx), RB2, RB2;
478 vpxor (7 * 16)(%rdx), RB3, RB3;
480 vmovdqu RA0, (0 * 16)(%rsi);
481 vmovdqu RA1, (1 * 16)(%rsi);
482 vmovdqu RA2, (2 * 16)(%rsi);
483 vmovdqu RA3, (3 * 16)(%rsi);
484 vmovdqu RB0, (4 * 16)(%rsi);
485 vmovdqu RB1, (5 * 16)(%rsi);
486 vmovdqu RB2, (6 * 16)(%rsi);
487 vmovdqu RB3, (7 * 16)(%rsi);
492 SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
495 * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
496 * const u8 *src, u8 *iv)
499 SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
501 * %rdi: round key array, CTX
502 * %rsi: dst (8 blocks)
503 * %rdx: src (8 blocks)
508 vmovdqu (0 * 16)(%rdx), RA0;
509 vmovdqu (1 * 16)(%rdx), RA1;
510 vmovdqu (2 * 16)(%rdx), RA2;
511 vmovdqu (3 * 16)(%rdx), RA3;
512 vmovdqu (4 * 16)(%rdx), RB0;
513 vmovdqu (5 * 16)(%rdx), RB1;
514 vmovdqu (6 * 16)(%rdx), RB2;
515 vmovdqu (7 * 16)(%rdx), RB3;
517 call __sm4_crypt_blk8;
519 vmovdqu (7 * 16)(%rdx), RNOT;
520 vpxor (%rcx), RA0, RA0;
521 vpxor (0 * 16)(%rdx), RA1, RA1;
522 vpxor (1 * 16)(%rdx), RA2, RA2;
523 vpxor (2 * 16)(%rdx), RA3, RA3;
524 vpxor (3 * 16)(%rdx), RB0, RB0;
525 vpxor (4 * 16)(%rdx), RB1, RB1;
526 vpxor (5 * 16)(%rdx), RB2, RB2;
527 vpxor (6 * 16)(%rdx), RB3, RB3;
528 vmovdqu RNOT, (%rcx); /* store new IV */
530 vmovdqu RA0, (0 * 16)(%rsi);
531 vmovdqu RA1, (1 * 16)(%rsi);
532 vmovdqu RA2, (2 * 16)(%rsi);
533 vmovdqu RA3, (3 * 16)(%rsi);
534 vmovdqu RB0, (4 * 16)(%rsi);
535 vmovdqu RB1, (5 * 16)(%rsi);
536 vmovdqu RB2, (6 * 16)(%rsi);
537 vmovdqu RB3, (7 * 16)(%rsi);
542 SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
545 * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
546 * const u8 *src, u8 *iv)
549 SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
551 * %rdi: round key array, CTX
552 * %rsi: dst (8 blocks)
553 * %rdx: src (8 blocks)
560 vmovdqu 0 * 16(%rdx), RA1;
561 vmovdqu 1 * 16(%rdx), RA2;
562 vmovdqu 2 * 16(%rdx), RA3;
563 vmovdqu 3 * 16(%rdx), RB0;
564 vmovdqu 4 * 16(%rdx), RB1;
565 vmovdqu 5 * 16(%rdx), RB2;
566 vmovdqu 6 * 16(%rdx), RB3;
569 vmovdqu 7 * 16(%rdx), RNOT;
570 vmovdqu RNOT, (%rcx);
572 call __sm4_crypt_blk8;
574 vpxor (0 * 16)(%rdx), RA0, RA0;
575 vpxor (1 * 16)(%rdx), RA1, RA1;
576 vpxor (2 * 16)(%rdx), RA2, RA2;
577 vpxor (3 * 16)(%rdx), RA3, RA3;
578 vpxor (4 * 16)(%rdx), RB0, RB0;
579 vpxor (5 * 16)(%rdx), RB1, RB1;
580 vpxor (6 * 16)(%rdx), RB2, RB2;
581 vpxor (7 * 16)(%rdx), RB3, RB3;
583 vmovdqu RA0, (0 * 16)(%rsi);
584 vmovdqu RA1, (1 * 16)(%rsi);
585 vmovdqu RA2, (2 * 16)(%rsi);
586 vmovdqu RA3, (3 * 16)(%rsi);
587 vmovdqu RB0, (4 * 16)(%rsi);
588 vmovdqu RB1, (5 * 16)(%rsi);
589 vmovdqu RB2, (6 * 16)(%rsi);
590 vmovdqu RB3, (7 * 16)(%rsi);
595 SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)