1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
12 /* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
13 * https://github.com/mjosaarinen/sm4ni
16 #include <linux/linkage.h>
17 #include <asm/frame.h>
21 /* vector registers */
24 #define MASK_4BIT %ymm2
46 #define MASK_4BITx %xmm2
60 /* Transpose four 32-bit words between 128-bit vector lanes. */
61 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \
62 vpunpckhdq x1, x0, t2; \
63 vpunpckldq x1, x0, x0; \
65 vpunpckldq x3, x2, t1; \
66 vpunpckhdq x3, x2, x2; \
68 vpunpckhqdq t1, x0, x1; \
69 vpunpcklqdq t1, x0, x0; \
71 vpunpckhqdq x2, t2, x3; \
72 vpunpcklqdq x2, t2, x2;
74 /* post-SubByte transform. */
75 #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
76 vpand x, mask4bit, tmp0; \
77 vpandn x, mask4bit, x; \
80 vpshufb tmp0, lo_t, tmp0; \
84 /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
85 * 'vaeslastenc' instruction. */
86 #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
87 vpandn mask4bit, x, tmp0; \
89 vpand x, mask4bit, x; \
91 vpshufb tmp0, lo_t, tmp0; \
96 .section .rodata.cst16, "aM", @progbits, 16
100 * Following four affine transform look-up tables are from work by
101 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
103 * These allow exposing SM4 S-Box from AES SubByte.
106 /* pre-SubByte affine transform, from SM4 field to AES field. */
108 .quad 0x9197E2E474720701, 0xC7C1B4B222245157
110 .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
112 /* post-SubByte affine transform, from AES field to SM4 field. */
114 .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
116 .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
118 /* For isolating SubBytes from AESENCLAST, inverse shift row */
120 .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
121 .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
123 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
124 .Linv_shift_row_rol_8:
125 .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
126 .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
128 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
129 .Linv_shift_row_rol_16:
130 .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
131 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
133 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
134 .Linv_shift_row_rol_24:
135 .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
136 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
138 /* For CTR-mode IV byteswap */
140 .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
142 /* For input word byte-swap */
144 .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
151 /* 12 bytes, only for padding */
153 .long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
159 SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
161 * %rdi: round key array, CTX
162 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
165 * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
170 vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
171 vpshufb RTMP2, RA0, RA0;
172 vpshufb RTMP2, RA1, RA1;
173 vpshufb RTMP2, RA2, RA2;
174 vpshufb RTMP2, RA3, RA3;
175 vpshufb RTMP2, RB0, RB0;
176 vpshufb RTMP2, RB1, RB1;
177 vpshufb RTMP2, RB2, RB2;
178 vpshufb RTMP2, RB3, RB3;
180 vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
181 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
182 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
184 #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
185 vpbroadcastd (4*(round))(%rdi), RX0; \
186 vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4; \
187 vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1; \
189 vpxor s1, RX0, RX0; \
190 vpxor s2, RX0, RX0; \
191 vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
192 vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2; \
193 vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3; \
194 vpxor r1, RX1, RX1; \
195 vpxor r2, RX1, RX1; \
196 vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
198 /* sbox, non-linear part */ \
199 transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
200 transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
201 vextracti128 $1, RX0, RTMP4x; \
202 vextracti128 $1, RX1, RTMP0x; \
203 vaesenclast MASK_4BITx, RX0x, RX0x; \
204 vaesenclast MASK_4BITx, RTMP4x, RTMP4x; \
205 vaesenclast MASK_4BITx, RX1x, RX1x; \
206 vaesenclast MASK_4BITx, RTMP0x, RTMP0x; \
207 vinserti128 $1, RTMP4x, RX0, RX0; \
208 vbroadcasti128 .Linv_shift_row rRIP, RTMP4; \
209 vinserti128 $1, RTMP0x, RX1, RX1; \
210 transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
211 transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
214 vpshufb RTMP4, RX0, RTMP0; \
215 vpxor RTMP0, s0, s0; /* s0 ^ x */ \
216 vpshufb RTMP4, RX1, RTMP2; \
217 vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4; \
218 vpxor RTMP2, r0, r0; /* r0 ^ x */ \
219 vpshufb RTMP4, RX0, RTMP1; \
220 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
221 vpshufb RTMP4, RX1, RTMP3; \
222 vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4; \
223 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \
224 vpshufb RTMP4, RX0, RTMP1; \
225 vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
226 vpshufb RTMP4, RX1, RTMP3; \
227 vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4; \
228 vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \
229 vpshufb RTMP4, RX0, RTMP1; \
230 vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
231 vpslld $2, RTMP0, RTMP1; \
232 vpsrld $30, RTMP0, RTMP0; \
233 vpxor RTMP0, s0, s0; \
234 /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
235 vpxor RTMP1, s0, s0; \
236 vpshufb RTMP4, RX1, RTMP3; \
237 vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
238 vpslld $2, RTMP2, RTMP3; \
239 vpsrld $30, RTMP2, RTMP2; \
240 vpxor RTMP2, r0, r0; \
241 /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
244 leaq (32*4)(%rdi), %rax;
247 ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
248 ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
249 ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
250 ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
251 leaq (4*4)(%rdi), %rdi;
253 jne .Lroundloop_blk8;
257 vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
259 transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
260 transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
261 vpshufb RTMP2, RA0, RA0;
262 vpshufb RTMP2, RA1, RA1;
263 vpshufb RTMP2, RA2, RA2;
264 vpshufb RTMP2, RA3, RA3;
265 vpshufb RTMP2, RB0, RB0;
266 vpshufb RTMP2, RB1, RB1;
267 vpshufb RTMP2, RB2, RB2;
268 vpshufb RTMP2, RB3, RB3;
272 SYM_FUNC_END(__sm4_crypt_blk16)
274 #define inc_le128(x, minus_one, tmp) \
275 vpcmpeqq minus_one, x, tmp; \
276 vpsubq minus_one, x, x; \
277 vpslldq $8, tmp, tmp; \
281 * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
282 * const u8 *src, u8 *iv)
285 SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
287 * %rdi: round key array, CTX
288 * %rsi: dst (16 blocks)
289 * %rdx: src (16 blocks)
290 * %rcx: iv (big endian, 128bit)
299 vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
300 vpcmpeqd RNOT, RNOT, RNOT;
301 vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */
302 vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
304 /* load IV and byteswap */
305 vmovdqu (%rcx), RTMP4x;
306 vpshufb RTMP3x, RTMP4x, RTMP4x;
307 vmovdqa RTMP4x, RTMP0x;
308 inc_le128(RTMP4x, RNOTx, RTMP1x);
309 vinserti128 $1, RTMP4x, RTMP0, RTMP0;
310 vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
312 /* check need for handling 64-bit overflow and carry */
313 cmpq $(0xffffffffffffffff - 16), %rax;
314 ja .Lhandle_ctr_carry;
317 vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
318 vpshufb RTMP3, RTMP0, RA1;
319 vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
320 vpshufb RTMP3, RTMP0, RA2;
321 vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
322 vpshufb RTMP3, RTMP0, RA3;
323 vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
324 vpshufb RTMP3, RTMP0, RB0;
325 vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
326 vpshufb RTMP3, RTMP0, RB1;
327 vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
328 vpshufb RTMP3, RTMP0, RB2;
329 vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
330 vpshufb RTMP3, RTMP0, RB3;
331 vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
332 vpshufb RTMP3x, RTMP0x, RTMP0x;
334 jmp .Lctr_carry_done;
338 inc_le128(RTMP0, RNOT, RTMP1);
339 inc_le128(RTMP0, RNOT, RTMP1);
340 vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
341 inc_le128(RTMP0, RNOT, RTMP1);
342 inc_le128(RTMP0, RNOT, RTMP1);
343 vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
344 inc_le128(RTMP0, RNOT, RTMP1);
345 inc_le128(RTMP0, RNOT, RTMP1);
346 vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
347 inc_le128(RTMP0, RNOT, RTMP1);
348 inc_le128(RTMP0, RNOT, RTMP1);
349 vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
350 inc_le128(RTMP0, RNOT, RTMP1);
351 inc_le128(RTMP0, RNOT, RTMP1);
352 vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
353 inc_le128(RTMP0, RNOT, RTMP1);
354 inc_le128(RTMP0, RNOT, RTMP1);
355 vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
356 inc_le128(RTMP0, RNOT, RTMP1);
357 inc_le128(RTMP0, RNOT, RTMP1);
358 vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
359 inc_le128(RTMP0, RNOT, RTMP1);
360 vextracti128 $1, RTMP0, RTMP0x;
361 vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
366 vmovdqu RTMP0x, (%rcx);
368 call __sm4_crypt_blk16;
370 vpxor (0 * 32)(%rdx), RA0, RA0;
371 vpxor (1 * 32)(%rdx), RA1, RA1;
372 vpxor (2 * 32)(%rdx), RA2, RA2;
373 vpxor (3 * 32)(%rdx), RA3, RA3;
374 vpxor (4 * 32)(%rdx), RB0, RB0;
375 vpxor (5 * 32)(%rdx), RB1, RB1;
376 vpxor (6 * 32)(%rdx), RB2, RB2;
377 vpxor (7 * 32)(%rdx), RB3, RB3;
379 vmovdqu RA0, (0 * 32)(%rsi);
380 vmovdqu RA1, (1 * 32)(%rsi);
381 vmovdqu RA2, (2 * 32)(%rsi);
382 vmovdqu RA3, (3 * 32)(%rsi);
383 vmovdqu RB0, (4 * 32)(%rsi);
384 vmovdqu RB1, (5 * 32)(%rsi);
385 vmovdqu RB2, (6 * 32)(%rsi);
386 vmovdqu RB3, (7 * 32)(%rsi);
391 SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
394 * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
395 * const u8 *src, u8 *iv)
398 SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
400 * %rdi: round key array, CTX
401 * %rsi: dst (16 blocks)
402 * %rdx: src (16 blocks)
409 vmovdqu (0 * 32)(%rdx), RA0;
410 vmovdqu (1 * 32)(%rdx), RA1;
411 vmovdqu (2 * 32)(%rdx), RA2;
412 vmovdqu (3 * 32)(%rdx), RA3;
413 vmovdqu (4 * 32)(%rdx), RB0;
414 vmovdqu (5 * 32)(%rdx), RB1;
415 vmovdqu (6 * 32)(%rdx), RB2;
416 vmovdqu (7 * 32)(%rdx), RB3;
418 call __sm4_crypt_blk16;
420 vmovdqu (%rcx), RNOTx;
421 vinserti128 $1, (%rdx), RNOT, RNOT;
422 vpxor RNOT, RA0, RA0;
423 vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
424 vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
425 vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
426 vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
427 vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
428 vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
429 vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
430 vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
431 vmovdqu RNOTx, (%rcx); /* store new IV */
433 vmovdqu RA0, (0 * 32)(%rsi);
434 vmovdqu RA1, (1 * 32)(%rsi);
435 vmovdqu RA2, (2 * 32)(%rsi);
436 vmovdqu RA3, (3 * 32)(%rsi);
437 vmovdqu RB0, (4 * 32)(%rsi);
438 vmovdqu RB1, (5 * 32)(%rsi);
439 vmovdqu RB2, (6 * 32)(%rsi);
440 vmovdqu RB3, (7 * 32)(%rsi);
445 SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
448 * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
449 * const u8 *src, u8 *iv)
452 SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
454 * %rdi: round key array, CTX
455 * %rsi: dst (16 blocks)
456 * %rdx: src (16 blocks)
464 vmovdqu (%rcx), RNOTx;
465 vinserti128 $1, (%rdx), RNOT, RA0;
466 vmovdqu (0 * 32 + 16)(%rdx), RA1;
467 vmovdqu (1 * 32 + 16)(%rdx), RA2;
468 vmovdqu (2 * 32 + 16)(%rdx), RA3;
469 vmovdqu (3 * 32 + 16)(%rdx), RB0;
470 vmovdqu (4 * 32 + 16)(%rdx), RB1;
471 vmovdqu (5 * 32 + 16)(%rdx), RB2;
472 vmovdqu (6 * 32 + 16)(%rdx), RB3;
475 vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
476 vmovdqu RNOTx, (%rcx);
478 call __sm4_crypt_blk16;
480 vpxor (0 * 32)(%rdx), RA0, RA0;
481 vpxor (1 * 32)(%rdx), RA1, RA1;
482 vpxor (2 * 32)(%rdx), RA2, RA2;
483 vpxor (3 * 32)(%rdx), RA3, RA3;
484 vpxor (4 * 32)(%rdx), RB0, RB0;
485 vpxor (5 * 32)(%rdx), RB1, RB1;
486 vpxor (6 * 32)(%rdx), RB2, RB2;
487 vpxor (7 * 32)(%rdx), RB3, RB3;
489 vmovdqu RA0, (0 * 32)(%rsi);
490 vmovdqu RA1, (1 * 32)(%rsi);
491 vmovdqu RA2, (2 * 32)(%rsi);
492 vmovdqu RA3, (3 * 32)(%rsi);
493 vmovdqu RB0, (4 * 32)(%rsi);
494 vmovdqu RB1, (5 * 32)(%rsi);
495 vmovdqu RB2, (6 * 32)(%rsi);
496 vmovdqu RB3, (7 * 32)(%rsi);
501 SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)