1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions
4 * as specified in rfc8998
5 * https://datatracker.ietf.org/doc/html/rfc8998
7 * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
10 #include <linux/linkage.h>
11 #include <linux/cfi_types.h>
12 #include <asm/assembler.h>
13 #include "sm4-ce-asm.h"
17 .irp b, 0, 1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31
22 .inst 0xcec08400 | (.L\vn << 5) | .L\vd
31 #define inc_le128(vctr) \
35 rev64 vctr.16b, vctr.16b; \
40 SYM_FUNC_START(sm4_ce_cbcmac_update)
42 * x0: round key array, CTX
57 ld1 {v0.16b-v3.16b}, [x2], #64
60 eor RMAC.16b, RMAC.16b, v0.16b
62 eor RMAC.16b, RMAC.16b, v1.16b
64 eor RMAC.16b, RMAC.16b, v2.16b
66 eor RMAC.16b, RMAC.16b, v3.16b
74 ld1 {v0.16b}, [x2], #16
77 eor RMAC.16b, RMAC.16b, v0.16b
79 cbnz w3, .Lcbcmac_loop_1x
84 SYM_FUNC_END(sm4_ce_cbcmac_update)
87 SYM_FUNC_START(sm4_ce_ccm_final)
89 * x0: round key array, CTX
90 * x1: ctr0 (big endian, 128 bit)
98 SM4_CRYPT_BLK2(RMAC, v0)
100 /* en-/decrypt the mac with ctr0 */
101 eor RMAC.16b, RMAC.16b, v0.16b
105 SYM_FUNC_END(sm4_ce_ccm_final)
108 SYM_TYPED_FUNC_START(sm4_ce_ccm_enc)
110 * x0: round key array, CTX
113 * x3: ctr (big endian, 128 bit)
127 blt .Lccm_enc_loop_1x
129 sub w4, w4, #(4 * 16)
132 inc_le128(v8) /* +0 */
133 inc_le128(v9) /* +1 */
134 inc_le128(v10) /* +2 */
135 inc_le128(v11) /* +3 */
137 ld1 {v0.16b-v3.16b}, [x2], #64
139 SM4_CRYPT_BLK2(v8, RMAC)
140 eor v8.16b, v8.16b, v0.16b
141 eor RMAC.16b, RMAC.16b, v0.16b
142 SM4_CRYPT_BLK2(v9, RMAC)
143 eor v9.16b, v9.16b, v1.16b
144 eor RMAC.16b, RMAC.16b, v1.16b
145 SM4_CRYPT_BLK2(v10, RMAC)
146 eor v10.16b, v10.16b, v2.16b
147 eor RMAC.16b, RMAC.16b, v2.16b
148 SM4_CRYPT_BLK2(v11, RMAC)
149 eor v11.16b, v11.16b, v3.16b
150 eor RMAC.16b, RMAC.16b, v3.16b
152 st1 {v8.16b-v11.16b}, [x1], #64
154 cbz w4, .Lccm_enc_end
166 ld1 {v0.16b}, [x2], #16
168 SM4_CRYPT_BLK2(v8, RMAC)
169 eor v8.16b, v8.16b, v0.16b
170 eor RMAC.16b, RMAC.16b, v0.16b
172 st1 {v8.16b}, [x1], #16
174 cbz w4, .Lccm_enc_end
181 SM4_CRYPT_BLK2(RMAC, v8)
187 ldrb w0, [x2], #1 /* get 1 byte from input */
188 umov w9, v8.b[0] /* get top crypted CTR byte */
189 umov w6, RMAC.b[0] /* get top MAC byte */
191 eor w9, w9, w0 /* w9 = CTR ^ input */
192 eor w6, w6, w0 /* w6 = MAC ^ input */
194 strb w9, [x1], #1 /* store out byte */
195 strb w6, [x5], #1 /* store MAC byte */
200 /* shift out one byte */
201 ext RMAC.16b, RMAC.16b, RMAC.16b, #1
202 ext v8.16b, v8.16b, v8.16b, #1
204 b .Lccm_enc_tail_loop
217 SYM_FUNC_END(sm4_ce_ccm_enc)
220 SYM_TYPED_FUNC_START(sm4_ce_ccm_dec)
222 * x0: round key array, CTX
225 * x3: ctr (big endian, 128 bit)
239 blt .Lccm_dec_loop_1x
241 sub w4, w4, #(4 * 16)
244 inc_le128(v8) /* +0 */
245 inc_le128(v9) /* +1 */
246 inc_le128(v10) /* +2 */
247 inc_le128(v11) /* +3 */
249 ld1 {v0.16b-v3.16b}, [x2], #64
251 SM4_CRYPT_BLK2(v8, RMAC)
252 eor v8.16b, v8.16b, v0.16b
253 eor RMAC.16b, RMAC.16b, v8.16b
254 SM4_CRYPT_BLK2(v9, RMAC)
255 eor v9.16b, v9.16b, v1.16b
256 eor RMAC.16b, RMAC.16b, v9.16b
257 SM4_CRYPT_BLK2(v10, RMAC)
258 eor v10.16b, v10.16b, v2.16b
259 eor RMAC.16b, RMAC.16b, v10.16b
260 SM4_CRYPT_BLK2(v11, RMAC)
261 eor v11.16b, v11.16b, v3.16b
262 eor RMAC.16b, RMAC.16b, v11.16b
264 st1 {v8.16b-v11.16b}, [x1], #64
266 cbz w4, .Lccm_dec_end
278 ld1 {v0.16b}, [x2], #16
280 SM4_CRYPT_BLK2(v8, RMAC)
281 eor v8.16b, v8.16b, v0.16b
282 eor RMAC.16b, RMAC.16b, v8.16b
284 st1 {v8.16b}, [x1], #16
286 cbz w4, .Lccm_dec_end
293 SM4_CRYPT_BLK2(RMAC, v8)
299 ldrb w0, [x2], #1 /* get 1 byte from input */
300 umov w9, v8.b[0] /* get top crypted CTR byte */
301 umov w6, RMAC.b[0] /* get top MAC byte */
303 eor w9, w9, w0 /* w9 = CTR ^ input */
304 eor w6, w6, w9 /* w6 = MAC ^ output */
306 strb w9, [x1], #1 /* store out byte */
307 strb w6, [x5], #1 /* store MAC byte */
312 /* shift out one byte */
313 ext RMAC.16b, RMAC.16b, RMAC.16b, #1
314 ext v8.16b, v8.16b, v8.16b, #1
316 b .Lccm_dec_tail_loop
329 SYM_FUNC_END(sm4_ce_ccm_dec)