GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / x86 / crypto / aesni-intel_glue.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/workqueue.h>
35 #include <linux/spinlock.h>
36 #ifdef CONFIG_X86_64
37 #include <asm/crypto/glue_helper.h>
38 #endif
39
40
41 #define AESNI_ALIGN     16
42 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
43 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
44 #define RFC4106_HASH_SUBKEY_SIZE 16
45 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
46 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
47 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
48
49 /* This data is stored at the end of the crypto_tfm struct.
50  * It's a type of per "session" data storage location.
51  * This needs to be 16 byte aligned.
52  */
53 struct aesni_rfc4106_gcm_ctx {
54         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
55         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
56         u8 nonce[4];
57 };
58
59 struct generic_gcmaes_ctx {
60         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62 };
63
64 struct aesni_xts_ctx {
65         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
67 };
68
69 #define GCM_BLOCK_LEN 16
70
71 struct gcm_context_data {
72         /* init, update and finalize context data */
73         u8 aad_hash[GCM_BLOCK_LEN];
74         u64 aad_length;
75         u64 in_length;
76         u8 partial_block_enc_key[GCM_BLOCK_LEN];
77         u8 orig_IV[GCM_BLOCK_LEN];
78         u8 current_counter[GCM_BLOCK_LEN];
79         u64 partial_block_len;
80         u64 unused;
81         u8 hash_keys[GCM_BLOCK_LEN * 16];
82 };
83
84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85                              unsigned int key_len);
86 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89                               const u8 *in, unsigned int len);
90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91                               const u8 *in, unsigned int len);
92 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93                               const u8 *in, unsigned int len, u8 *iv);
94 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95                               const u8 *in, unsigned int len, u8 *iv);
96
97 #define AVX_GEN2_OPTSIZE 640
98 #define AVX_GEN4_OPTSIZE 4096
99
100 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
101                                   const u8 *in, unsigned int len, u8 *iv);
102
103 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104                                   const u8 *in, unsigned int len, u8 *iv);
105
106 #ifdef CONFIG_X86_64
107
108 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
109                               const u8 *in, unsigned int len, u8 *iv);
110 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
111                               const u8 *in, unsigned int len, u8 *iv);
112
113 /* asmlinkage void aesni_gcm_enc()
114  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
115  * struct gcm_context_data.  May be uninitialized.
116  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117  * const u8 *in, Plaintext input
118  * unsigned long plaintext_len, Length of data in bytes for encryption.
119  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
120  *         16-byte aligned pointer.
121  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122  * const u8 *aad, Additional Authentication Data (AAD)
123  * unsigned long aad_len, Length of AAD in bytes.
124  * u8 *auth_tag, Authenticated Tag output.
125  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
126  *          Valid values are 16 (most likely), 12 or 8.
127  */
128 asmlinkage void aesni_gcm_enc(void *ctx,
129                         struct gcm_context_data *gdata, u8 *out,
130                         const u8 *in, unsigned long plaintext_len, u8 *iv,
131                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
132                         u8 *auth_tag, unsigned long auth_tag_len);
133
134 /* asmlinkage void aesni_gcm_dec()
135  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
136  * struct gcm_context_data.  May be uninitialized.
137  * u8 *out, Plaintext output. Decrypt in-place is allowed.
138  * const u8 *in, Ciphertext input
139  * unsigned long ciphertext_len, Length of data in bytes for decryption.
140  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
141  *         16-byte aligned pointer.
142  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143  * const u8 *aad, Additional Authentication Data (AAD)
144  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145  * to be 8 or 12 bytes
146  * u8 *auth_tag, Authenticated Tag output.
147  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148  * Valid values are 16 (most likely), 12 or 8.
149  */
150 asmlinkage void aesni_gcm_dec(void *ctx,
151                         struct gcm_context_data *gdata, u8 *out,
152                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
153                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154                         u8 *auth_tag, unsigned long auth_tag_len);
155
156 /* Scatter / Gather routines, with args similar to above */
157 asmlinkage void aesni_gcm_init(void *ctx,
158                                struct gcm_context_data *gdata,
159                                u8 *iv,
160                                u8 *hash_subkey, const u8 *aad,
161                                unsigned long aad_len);
162 asmlinkage void aesni_gcm_enc_update(void *ctx,
163                                      struct gcm_context_data *gdata, u8 *out,
164                                      const u8 *in, unsigned long plaintext_len);
165 asmlinkage void aesni_gcm_dec_update(void *ctx,
166                                      struct gcm_context_data *gdata, u8 *out,
167                                      const u8 *in,
168                                      unsigned long ciphertext_len);
169 asmlinkage void aesni_gcm_finalize(void *ctx,
170                                    struct gcm_context_data *gdata,
171                                    u8 *auth_tag, unsigned long auth_tag_len);
172
173 static const struct aesni_gcm_tfm_s {
174         void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv,
175                      u8 *hash_subkey, const u8 *aad, unsigned long aad_len);
176         void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
177                            const u8 *in, unsigned long plaintext_len);
178         void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out,
179                            const u8 *in, unsigned long ciphertext_len);
180         void (*finalize)(void *ctx, struct gcm_context_data *gdata,
181                          u8 *auth_tag, unsigned long auth_tag_len);
182 } *aesni_gcm_tfm;
183
184 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
185         .init = &aesni_gcm_init,
186         .enc_update = &aesni_gcm_enc_update,
187         .dec_update = &aesni_gcm_dec_update,
188         .finalize = &aesni_gcm_finalize,
189 };
190
191 #ifdef CONFIG_AS_AVX
192 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
193                 void *keys, u8 *out, unsigned int num_bytes);
194 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
195                 void *keys, u8 *out, unsigned int num_bytes);
196 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
197                 void *keys, u8 *out, unsigned int num_bytes);
198 /*
199  * asmlinkage void aesni_gcm_init_avx_gen2()
200  * gcm_data *my_ctx_data, context data
201  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
202  */
203 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
204                                         struct gcm_context_data *gdata,
205                                         u8 *iv,
206                                         u8 *hash_subkey,
207                                         const u8 *aad,
208                                         unsigned long aad_len);
209
210 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
211                                      struct gcm_context_data *gdata, u8 *out,
212                                      const u8 *in, unsigned long plaintext_len);
213 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
214                                      struct gcm_context_data *gdata, u8 *out,
215                                      const u8 *in,
216                                      unsigned long ciphertext_len);
217 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
218                                    struct gcm_context_data *gdata,
219                                    u8 *auth_tag, unsigned long auth_tag_len);
220
221 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
222                                 struct gcm_context_data *gdata, u8 *out,
223                         const u8 *in, unsigned long plaintext_len, u8 *iv,
224                         const u8 *aad, unsigned long aad_len,
225                         u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
228                                 struct gcm_context_data *gdata, u8 *out,
229                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
230                         const u8 *aad, unsigned long aad_len,
231                         u8 *auth_tag, unsigned long auth_tag_len);
232
233 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
234         .init = &aesni_gcm_init_avx_gen2,
235         .enc_update = &aesni_gcm_enc_update_avx_gen2,
236         .dec_update = &aesni_gcm_dec_update_avx_gen2,
237         .finalize = &aesni_gcm_finalize_avx_gen2,
238 };
239
240 #endif
241
242 #ifdef CONFIG_AS_AVX2
243 /*
244  * asmlinkage void aesni_gcm_init_avx_gen4()
245  * gcm_data *my_ctx_data, context data
246  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
247  */
248 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
249                                         struct gcm_context_data *gdata,
250                                         u8 *iv,
251                                         u8 *hash_subkey,
252                                         const u8 *aad,
253                                         unsigned long aad_len);
254
255 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
256                                      struct gcm_context_data *gdata, u8 *out,
257                                      const u8 *in, unsigned long plaintext_len);
258 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
259                                      struct gcm_context_data *gdata, u8 *out,
260                                      const u8 *in,
261                                      unsigned long ciphertext_len);
262 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
263                                    struct gcm_context_data *gdata,
264                                    u8 *auth_tag, unsigned long auth_tag_len);
265
266 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
267                                 struct gcm_context_data *gdata, u8 *out,
268                         const u8 *in, unsigned long plaintext_len, u8 *iv,
269                         const u8 *aad, unsigned long aad_len,
270                         u8 *auth_tag, unsigned long auth_tag_len);
271
272 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
273                                 struct gcm_context_data *gdata, u8 *out,
274                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
275                         const u8 *aad, unsigned long aad_len,
276                         u8 *auth_tag, unsigned long auth_tag_len);
277
278 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
279         .init = &aesni_gcm_init_avx_gen4,
280         .enc_update = &aesni_gcm_enc_update_avx_gen4,
281         .dec_update = &aesni_gcm_dec_update_avx_gen4,
282         .finalize = &aesni_gcm_finalize_avx_gen4,
283 };
284
285 #endif
286
287 static inline struct
288 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
289 {
290         unsigned long align = AESNI_ALIGN;
291
292         if (align <= crypto_tfm_ctx_alignment())
293                 align = 1;
294         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
295 }
296
297 static inline struct
298 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
299 {
300         unsigned long align = AESNI_ALIGN;
301
302         if (align <= crypto_tfm_ctx_alignment())
303                 align = 1;
304         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
305 }
306 #endif
307
308 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
309 {
310         unsigned long addr = (unsigned long)raw_ctx;
311         unsigned long align = AESNI_ALIGN;
312
313         if (align <= crypto_tfm_ctx_alignment())
314                 align = 1;
315         return (struct crypto_aes_ctx *)ALIGN(addr, align);
316 }
317
318 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
319                               const u8 *in_key, unsigned int key_len)
320 {
321         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
322         u32 *flags = &tfm->crt_flags;
323         int err;
324
325         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
326             key_len != AES_KEYSIZE_256) {
327                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
328                 return -EINVAL;
329         }
330
331         if (!crypto_simd_usable())
332                 err = aes_expandkey(ctx, in_key, key_len);
333         else {
334                 kernel_fpu_begin();
335                 err = aesni_set_key(ctx, in_key, key_len);
336                 kernel_fpu_end();
337         }
338
339         return err;
340 }
341
342 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
343                        unsigned int key_len)
344 {
345         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
346 }
347
348 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
349 {
350         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
351
352         if (!crypto_simd_usable()) {
353                 aes_encrypt(ctx, dst, src);
354         } else {
355                 kernel_fpu_begin();
356                 aesni_enc(ctx, dst, src);
357                 kernel_fpu_end();
358         }
359 }
360
361 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
362 {
363         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
364
365         if (!crypto_simd_usable()) {
366                 aes_decrypt(ctx, dst, src);
367         } else {
368                 kernel_fpu_begin();
369                 aesni_dec(ctx, dst, src);
370                 kernel_fpu_end();
371         }
372 }
373
374 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
375                                  unsigned int len)
376 {
377         return aes_set_key_common(crypto_skcipher_tfm(tfm),
378                                   crypto_skcipher_ctx(tfm), key, len);
379 }
380
381 static int ecb_encrypt(struct skcipher_request *req)
382 {
383         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
384         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
385         struct skcipher_walk walk;
386         unsigned int nbytes;
387         int err;
388
389         err = skcipher_walk_virt(&walk, req, true);
390
391         kernel_fpu_begin();
392         while ((nbytes = walk.nbytes)) {
393                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
394                               nbytes & AES_BLOCK_MASK);
395                 nbytes &= AES_BLOCK_SIZE - 1;
396                 err = skcipher_walk_done(&walk, nbytes);
397         }
398         kernel_fpu_end();
399
400         return err;
401 }
402
403 static int ecb_decrypt(struct skcipher_request *req)
404 {
405         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
406         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
407         struct skcipher_walk walk;
408         unsigned int nbytes;
409         int err;
410
411         err = skcipher_walk_virt(&walk, req, true);
412
413         kernel_fpu_begin();
414         while ((nbytes = walk.nbytes)) {
415                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
416                               nbytes & AES_BLOCK_MASK);
417                 nbytes &= AES_BLOCK_SIZE - 1;
418                 err = skcipher_walk_done(&walk, nbytes);
419         }
420         kernel_fpu_end();
421
422         return err;
423 }
424
425 static int cbc_encrypt(struct skcipher_request *req)
426 {
427         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
428         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
429         struct skcipher_walk walk;
430         unsigned int nbytes;
431         int err;
432
433         err = skcipher_walk_virt(&walk, req, true);
434
435         kernel_fpu_begin();
436         while ((nbytes = walk.nbytes)) {
437                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
438                               nbytes & AES_BLOCK_MASK, walk.iv);
439                 nbytes &= AES_BLOCK_SIZE - 1;
440                 err = skcipher_walk_done(&walk, nbytes);
441         }
442         kernel_fpu_end();
443
444         return err;
445 }
446
447 static int cbc_decrypt(struct skcipher_request *req)
448 {
449         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
450         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
451         struct skcipher_walk walk;
452         unsigned int nbytes;
453         int err;
454
455         err = skcipher_walk_virt(&walk, req, true);
456
457         kernel_fpu_begin();
458         while ((nbytes = walk.nbytes)) {
459                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
460                               nbytes & AES_BLOCK_MASK, walk.iv);
461                 nbytes &= AES_BLOCK_SIZE - 1;
462                 err = skcipher_walk_done(&walk, nbytes);
463         }
464         kernel_fpu_end();
465
466         return err;
467 }
468
469 #ifdef CONFIG_X86_64
470 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
471                             struct skcipher_walk *walk)
472 {
473         u8 *ctrblk = walk->iv;
474         u8 keystream[AES_BLOCK_SIZE];
475         u8 *src = walk->src.virt.addr;
476         u8 *dst = walk->dst.virt.addr;
477         unsigned int nbytes = walk->nbytes;
478
479         aesni_enc(ctx, keystream, ctrblk);
480         crypto_xor_cpy(dst, keystream, src, nbytes);
481
482         crypto_inc(ctrblk, AES_BLOCK_SIZE);
483 }
484
485 #ifdef CONFIG_AS_AVX
486 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
487                               const u8 *in, unsigned int len, u8 *iv)
488 {
489         /*
490          * based on key length, override with the by8 version
491          * of ctr mode encryption/decryption for improved performance
492          * aes_set_key_common() ensures that key length is one of
493          * {128,192,256}
494          */
495         if (ctx->key_length == AES_KEYSIZE_128)
496                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
497         else if (ctx->key_length == AES_KEYSIZE_192)
498                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
499         else
500                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
501 }
502 #endif
503
504 static int ctr_crypt(struct skcipher_request *req)
505 {
506         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
507         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
508         struct skcipher_walk walk;
509         unsigned int nbytes;
510         int err;
511
512         err = skcipher_walk_virt(&walk, req, true);
513
514         kernel_fpu_begin();
515         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
516                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
517                                       nbytes & AES_BLOCK_MASK, walk.iv);
518                 nbytes &= AES_BLOCK_SIZE - 1;
519                 err = skcipher_walk_done(&walk, nbytes);
520         }
521         if (walk.nbytes) {
522                 ctr_crypt_final(ctx, &walk);
523                 err = skcipher_walk_done(&walk, 0);
524         }
525         kernel_fpu_end();
526
527         return err;
528 }
529
530 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
531                             unsigned int keylen)
532 {
533         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
534         int err;
535
536         err = xts_verify_key(tfm, key, keylen);
537         if (err)
538                 return err;
539
540         keylen /= 2;
541
542         /* first half of xts-key is for crypt */
543         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
544                                  key, keylen);
545         if (err)
546                 return err;
547
548         /* second half of xts-key is for tweak */
549         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
550                                   key + keylen, keylen);
551 }
552
553
554 static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
555 {
556         glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
557 }
558
559 static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
560 {
561         glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
562 }
563
564 static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
565 {
566         aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
567 }
568
569 static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
570 {
571         aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
572 }
573
574 static const struct common_glue_ctx aesni_enc_xts = {
575         .num_funcs = 2,
576         .fpu_blocks_limit = 1,
577
578         .funcs = { {
579                 .num_blocks = 32,
580                 .fn_u = { .xts = aesni_xts_enc32 }
581         }, {
582                 .num_blocks = 1,
583                 .fn_u = { .xts = aesni_xts_enc }
584         } }
585 };
586
587 static const struct common_glue_ctx aesni_dec_xts = {
588         .num_funcs = 2,
589         .fpu_blocks_limit = 1,
590
591         .funcs = { {
592                 .num_blocks = 32,
593                 .fn_u = { .xts = aesni_xts_dec32 }
594         }, {
595                 .num_blocks = 1,
596                 .fn_u = { .xts = aesni_xts_dec }
597         } }
598 };
599
600 static int xts_encrypt(struct skcipher_request *req)
601 {
602         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
603         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
604
605         return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
606                                    aes_ctx(ctx->raw_tweak_ctx),
607                                    aes_ctx(ctx->raw_crypt_ctx),
608                                    false);
609 }
610
611 static int xts_decrypt(struct skcipher_request *req)
612 {
613         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
614         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
615
616         return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
617                                    aes_ctx(ctx->raw_tweak_ctx),
618                                    aes_ctx(ctx->raw_crypt_ctx),
619                                    true);
620 }
621
622 static int
623 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
624 {
625         struct crypto_aes_ctx ctx;
626         int ret;
627
628         ret = aes_expandkey(&ctx, key, key_len);
629         if (ret)
630                 return ret;
631
632         /* Clear the data in the hash sub key container to zero.*/
633         /* We want to cipher all zeros to create the hash sub key. */
634         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
635
636         aes_encrypt(&ctx, hash_subkey, hash_subkey);
637
638         memzero_explicit(&ctx, sizeof(ctx));
639         return 0;
640 }
641
642 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
643                                   unsigned int key_len)
644 {
645         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
646
647         if (key_len < 4) {
648                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
649                 return -EINVAL;
650         }
651         /*Account for 4 byte nonce at the end.*/
652         key_len -= 4;
653
654         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
655
656         return aes_set_key_common(crypto_aead_tfm(aead),
657                                   &ctx->aes_key_expanded, key, key_len) ?:
658                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
659 }
660
661 /* This is the Integrity Check Value (aka the authentication tag) length and can
662  * be 8, 12 or 16 bytes long. */
663 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
664                                        unsigned int authsize)
665 {
666         switch (authsize) {
667         case 8:
668         case 12:
669         case 16:
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         return 0;
676 }
677
678 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
679                                        unsigned int authsize)
680 {
681         switch (authsize) {
682         case 4:
683         case 8:
684         case 12:
685         case 13:
686         case 14:
687         case 15:
688         case 16:
689                 break;
690         default:
691                 return -EINVAL;
692         }
693
694         return 0;
695 }
696
697 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
698                               unsigned int assoclen, u8 *hash_subkey,
699                               u8 *iv, void *aes_ctx)
700 {
701         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
702         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
703         const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
704         u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
705         struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
706         struct scatter_walk dst_sg_walk = {};
707         unsigned long left = req->cryptlen;
708         unsigned long len, srclen, dstlen;
709         struct scatter_walk assoc_sg_walk;
710         struct scatter_walk src_sg_walk;
711         struct scatterlist src_start[2];
712         struct scatterlist dst_start[2];
713         struct scatterlist *src_sg;
714         struct scatterlist *dst_sg;
715         u8 *src, *dst, *assoc;
716         u8 *assocmem = NULL;
717         u8 authTag[16];
718
719         if (!enc)
720                 left -= auth_tag_len;
721
722 #ifdef CONFIG_AS_AVX2
723         if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
724                 gcm_tfm = &aesni_gcm_tfm_avx_gen2;
725 #endif
726 #ifdef CONFIG_AS_AVX
727         if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
728                 gcm_tfm = &aesni_gcm_tfm_sse;
729 #endif
730
731         /* Linearize assoc, if not already linear */
732         if (req->src->length >= assoclen && req->src->length &&
733                 (!PageHighMem(sg_page(req->src)) ||
734                         req->src->offset + req->src->length <= PAGE_SIZE)) {
735                 scatterwalk_start(&assoc_sg_walk, req->src);
736                 assoc = scatterwalk_map(&assoc_sg_walk);
737         } else {
738                 /* assoc can be any length, so must be on heap */
739                 assocmem = kmalloc(assoclen, GFP_ATOMIC);
740                 if (unlikely(!assocmem))
741                         return -ENOMEM;
742                 assoc = assocmem;
743
744                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
745         }
746
747         if (left) {
748                 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
749                 scatterwalk_start(&src_sg_walk, src_sg);
750                 if (req->src != req->dst) {
751                         dst_sg = scatterwalk_ffwd(dst_start, req->dst,
752                                                   req->assoclen);
753                         scatterwalk_start(&dst_sg_walk, dst_sg);
754                 }
755         }
756
757         kernel_fpu_begin();
758         gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
759         if (req->src != req->dst) {
760                 while (left) {
761                         src = scatterwalk_map(&src_sg_walk);
762                         dst = scatterwalk_map(&dst_sg_walk);
763                         srclen = scatterwalk_clamp(&src_sg_walk, left);
764                         dstlen = scatterwalk_clamp(&dst_sg_walk, left);
765                         len = min(srclen, dstlen);
766                         if (len) {
767                                 if (enc)
768                                         gcm_tfm->enc_update(aes_ctx, data,
769                                                              dst, src, len);
770                                 else
771                                         gcm_tfm->dec_update(aes_ctx, data,
772                                                              dst, src, len);
773                         }
774                         left -= len;
775
776                         scatterwalk_unmap(src);
777                         scatterwalk_unmap(dst);
778                         scatterwalk_advance(&src_sg_walk, len);
779                         scatterwalk_advance(&dst_sg_walk, len);
780                         scatterwalk_done(&src_sg_walk, 0, left);
781                         scatterwalk_done(&dst_sg_walk, 1, left);
782                 }
783         } else {
784                 while (left) {
785                         dst = src = scatterwalk_map(&src_sg_walk);
786                         len = scatterwalk_clamp(&src_sg_walk, left);
787                         if (len) {
788                                 if (enc)
789                                         gcm_tfm->enc_update(aes_ctx, data,
790                                                              src, src, len);
791                                 else
792                                         gcm_tfm->dec_update(aes_ctx, data,
793                                                              src, src, len);
794                         }
795                         left -= len;
796                         scatterwalk_unmap(src);
797                         scatterwalk_advance(&src_sg_walk, len);
798                         scatterwalk_done(&src_sg_walk, 1, left);
799                 }
800         }
801         gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
802         kernel_fpu_end();
803
804         if (!assocmem)
805                 scatterwalk_unmap(assoc);
806         else
807                 kfree(assocmem);
808
809         if (!enc) {
810                 u8 authTagMsg[16];
811
812                 /* Copy out original authTag */
813                 scatterwalk_map_and_copy(authTagMsg, req->src,
814                                          req->assoclen + req->cryptlen -
815                                          auth_tag_len,
816                                          auth_tag_len, 0);
817
818                 /* Compare generated tag with passed in tag. */
819                 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
820                         -EBADMSG : 0;
821         }
822
823         /* Copy in the authTag */
824         scatterwalk_map_and_copy(authTag, req->dst,
825                                  req->assoclen + req->cryptlen,
826                                  auth_tag_len, 1);
827
828         return 0;
829 }
830
831 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
832                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
833 {
834         return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
835                                 aes_ctx);
836 }
837
838 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
839                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
840 {
841         return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
842                                 aes_ctx);
843 }
844
845 static int helper_rfc4106_encrypt(struct aead_request *req)
846 {
847         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
848         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
849         void *aes_ctx = &(ctx->aes_key_expanded);
850         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
851         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
852         unsigned int i;
853         __be32 counter = cpu_to_be32(1);
854
855         /* Assuming we are supporting rfc4106 64-bit extended */
856         /* sequence numbers We need to have the AAD length equal */
857         /* to 16 or 20 bytes */
858         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
859                 return -EINVAL;
860
861         /* IV below built */
862         for (i = 0; i < 4; i++)
863                 *(iv+i) = ctx->nonce[i];
864         for (i = 0; i < 8; i++)
865                 *(iv+4+i) = req->iv[i];
866         *((__be32 *)(iv+12)) = counter;
867
868         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
869                               aes_ctx);
870 }
871
872 static int helper_rfc4106_decrypt(struct aead_request *req)
873 {
874         __be32 counter = cpu_to_be32(1);
875         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
876         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
877         void *aes_ctx = &(ctx->aes_key_expanded);
878         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
879         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
880         unsigned int i;
881
882         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
883                 return -EINVAL;
884
885         /* Assuming we are supporting rfc4106 64-bit extended */
886         /* sequence numbers We need to have the AAD length */
887         /* equal to 16 or 20 bytes */
888
889         /* IV below built */
890         for (i = 0; i < 4; i++)
891                 *(iv+i) = ctx->nonce[i];
892         for (i = 0; i < 8; i++)
893                 *(iv+4+i) = req->iv[i];
894         *((__be32 *)(iv+12)) = counter;
895
896         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
897                               aes_ctx);
898 }
899 #endif
900
901 static struct crypto_alg aesni_cipher_alg = {
902         .cra_name               = "aes",
903         .cra_driver_name        = "aes-aesni",
904         .cra_priority           = 300,
905         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
906         .cra_blocksize          = AES_BLOCK_SIZE,
907         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
908         .cra_module             = THIS_MODULE,
909         .cra_u  = {
910                 .cipher = {
911                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
912                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
913                         .cia_setkey             = aes_set_key,
914                         .cia_encrypt            = aesni_encrypt,
915                         .cia_decrypt            = aesni_decrypt
916                 }
917         }
918 };
919
920 static struct skcipher_alg aesni_skciphers[] = {
921         {
922                 .base = {
923                         .cra_name               = "__ecb(aes)",
924                         .cra_driver_name        = "__ecb-aes-aesni",
925                         .cra_priority           = 400,
926                         .cra_flags              = CRYPTO_ALG_INTERNAL,
927                         .cra_blocksize          = AES_BLOCK_SIZE,
928                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
929                         .cra_module             = THIS_MODULE,
930                 },
931                 .min_keysize    = AES_MIN_KEY_SIZE,
932                 .max_keysize    = AES_MAX_KEY_SIZE,
933                 .setkey         = aesni_skcipher_setkey,
934                 .encrypt        = ecb_encrypt,
935                 .decrypt        = ecb_decrypt,
936         }, {
937                 .base = {
938                         .cra_name               = "__cbc(aes)",
939                         .cra_driver_name        = "__cbc-aes-aesni",
940                         .cra_priority           = 400,
941                         .cra_flags              = CRYPTO_ALG_INTERNAL,
942                         .cra_blocksize          = AES_BLOCK_SIZE,
943                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
944                         .cra_module             = THIS_MODULE,
945                 },
946                 .min_keysize    = AES_MIN_KEY_SIZE,
947                 .max_keysize    = AES_MAX_KEY_SIZE,
948                 .ivsize         = AES_BLOCK_SIZE,
949                 .setkey         = aesni_skcipher_setkey,
950                 .encrypt        = cbc_encrypt,
951                 .decrypt        = cbc_decrypt,
952 #ifdef CONFIG_X86_64
953         }, {
954                 .base = {
955                         .cra_name               = "__ctr(aes)",
956                         .cra_driver_name        = "__ctr-aes-aesni",
957                         .cra_priority           = 400,
958                         .cra_flags              = CRYPTO_ALG_INTERNAL,
959                         .cra_blocksize          = 1,
960                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
961                         .cra_module             = THIS_MODULE,
962                 },
963                 .min_keysize    = AES_MIN_KEY_SIZE,
964                 .max_keysize    = AES_MAX_KEY_SIZE,
965                 .ivsize         = AES_BLOCK_SIZE,
966                 .chunksize      = AES_BLOCK_SIZE,
967                 .setkey         = aesni_skcipher_setkey,
968                 .encrypt        = ctr_crypt,
969                 .decrypt        = ctr_crypt,
970         }, {
971                 .base = {
972                         .cra_name               = "__xts(aes)",
973                         .cra_driver_name        = "__xts-aes-aesni",
974                         .cra_priority           = 401,
975                         .cra_flags              = CRYPTO_ALG_INTERNAL,
976                         .cra_blocksize          = AES_BLOCK_SIZE,
977                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
978                         .cra_module             = THIS_MODULE,
979                 },
980                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
981                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
982                 .ivsize         = AES_BLOCK_SIZE,
983                 .setkey         = xts_aesni_setkey,
984                 .encrypt        = xts_encrypt,
985                 .decrypt        = xts_decrypt,
986 #endif
987         }
988 };
989
990 static
991 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
992
993 #ifdef CONFIG_X86_64
994 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
995                                   unsigned int key_len)
996 {
997         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
998
999         return aes_set_key_common(crypto_aead_tfm(aead),
1000                                   &ctx->aes_key_expanded, key, key_len) ?:
1001                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1002 }
1003
1004 static int generic_gcmaes_encrypt(struct aead_request *req)
1005 {
1006         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1007         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1008         void *aes_ctx = &(ctx->aes_key_expanded);
1009         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1010         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1011         __be32 counter = cpu_to_be32(1);
1012
1013         memcpy(iv, req->iv, 12);
1014         *((__be32 *)(iv+12)) = counter;
1015
1016         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1017                               aes_ctx);
1018 }
1019
1020 static int generic_gcmaes_decrypt(struct aead_request *req)
1021 {
1022         __be32 counter = cpu_to_be32(1);
1023         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1025         void *aes_ctx = &(ctx->aes_key_expanded);
1026         u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1027         u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1028
1029         memcpy(iv, req->iv, 12);
1030         *((__be32 *)(iv+12)) = counter;
1031
1032         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1033                               aes_ctx);
1034 }
1035
1036 static struct aead_alg aesni_aeads[] = { {
1037         .setkey                 = common_rfc4106_set_key,
1038         .setauthsize            = common_rfc4106_set_authsize,
1039         .encrypt                = helper_rfc4106_encrypt,
1040         .decrypt                = helper_rfc4106_decrypt,
1041         .ivsize                 = GCM_RFC4106_IV_SIZE,
1042         .maxauthsize            = 16,
1043         .base = {
1044                 .cra_name               = "__rfc4106(gcm(aes))",
1045                 .cra_driver_name        = "__rfc4106-gcm-aesni",
1046                 .cra_priority           = 400,
1047                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1048                 .cra_blocksize          = 1,
1049                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1050                 .cra_alignmask          = AESNI_ALIGN - 1,
1051                 .cra_module             = THIS_MODULE,
1052         },
1053 }, {
1054         .setkey                 = generic_gcmaes_set_key,
1055         .setauthsize            = generic_gcmaes_set_authsize,
1056         .encrypt                = generic_gcmaes_encrypt,
1057         .decrypt                = generic_gcmaes_decrypt,
1058         .ivsize                 = GCM_AES_IV_SIZE,
1059         .maxauthsize            = 16,
1060         .base = {
1061                 .cra_name               = "__gcm(aes)",
1062                 .cra_driver_name        = "__generic-gcm-aesni",
1063                 .cra_priority           = 400,
1064                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1065                 .cra_blocksize          = 1,
1066                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1067                 .cra_alignmask          = AESNI_ALIGN - 1,
1068                 .cra_module             = THIS_MODULE,
1069         },
1070 } };
1071 #else
1072 static struct aead_alg aesni_aeads[0];
1073 #endif
1074
1075 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1076
1077 static const struct x86_cpu_id aesni_cpu_id[] = {
1078         X86_FEATURE_MATCH(X86_FEATURE_AES),
1079         {}
1080 };
1081 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1082
1083 static int __init aesni_init(void)
1084 {
1085         int err;
1086
1087         if (!x86_match_cpu(aesni_cpu_id))
1088                 return -ENODEV;
1089 #ifdef CONFIG_X86_64
1090 #ifdef CONFIG_AS_AVX2
1091         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1092                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1093                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
1094         } else
1095 #endif
1096 #ifdef CONFIG_AS_AVX
1097         if (boot_cpu_has(X86_FEATURE_AVX)) {
1098                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1099                 aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
1100         } else
1101 #endif
1102         {
1103                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1104                 aesni_gcm_tfm = &aesni_gcm_tfm_sse;
1105         }
1106         aesni_ctr_enc_tfm = aesni_ctr_enc;
1107 #ifdef CONFIG_AS_AVX
1108         if (boot_cpu_has(X86_FEATURE_AVX)) {
1109                 /* optimize performance of ctr mode encryption transform */
1110                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1111                 pr_info("AES CTR mode by8 optimization enabled\n");
1112         }
1113 #endif
1114 #endif
1115
1116         err = crypto_register_alg(&aesni_cipher_alg);
1117         if (err)
1118                 return err;
1119
1120         err = simd_register_skciphers_compat(aesni_skciphers,
1121                                              ARRAY_SIZE(aesni_skciphers),
1122                                              aesni_simd_skciphers);
1123         if (err)
1124                 goto unregister_cipher;
1125
1126         err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1127                                          aesni_simd_aeads);
1128         if (err)
1129                 goto unregister_skciphers;
1130
1131         return 0;
1132
1133 unregister_skciphers:
1134         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1135                                   aesni_simd_skciphers);
1136 unregister_cipher:
1137         crypto_unregister_alg(&aesni_cipher_alg);
1138         return err;
1139 }
1140
1141 static void __exit aesni_exit(void)
1142 {
1143         simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1144                               aesni_simd_aeads);
1145         simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1146                                   aesni_simd_skciphers);
1147         crypto_unregister_alg(&aesni_cipher_alg);
1148 }
1149
1150 late_initcall(aesni_init);
1151 module_exit(aesni_exit);
1152
1153 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1154 MODULE_LICENSE("GPL");
1155 MODULE_ALIAS_CRYPTO("aes");