GNU Linux-libre 4.14.266-gnu1
[releases.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <ying.huang@intel.com>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
11  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
12  *             Tadeusz Struk (tadeusz.struk@intel.com)
13  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/module.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <crypto/b128ops.h>
31 #include <crypto/gcm.h>
32 #include <crypto/xts.h>
33 #include <asm/cpu_device_id.h>
34 #include <asm/fpu/api.h>
35 #include <asm/crypto/aes.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/simd.h>
39 #include <crypto/internal/skcipher.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
49 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
50 #define RFC4106_HASH_SUBKEY_SIZE 16
51 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
52 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
53 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
54
55 /* This data is stored at the end of the crypto_tfm struct.
56  * It's a type of per "session" data storage location.
57  * This needs to be 16 byte aligned.
58  */
59 struct aesni_rfc4106_gcm_ctx {
60         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62         u8 nonce[4];
63 };
64
65 struct generic_gcmaes_ctx {
66         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
67         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
68 };
69
70 struct aesni_xts_ctx {
71         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
72         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
73 };
74
75 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
76                              unsigned int key_len);
77 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
78                           const u8 *in);
79 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
80                           const u8 *in);
81 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
82                               const u8 *in, unsigned int len);
83 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
84                               const u8 *in, unsigned int len);
85 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
86                               const u8 *in, unsigned int len, u8 *iv);
87 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
88                               const u8 *in, unsigned int len, u8 *iv);
89
90 int crypto_fpu_init(void);
91 void crypto_fpu_exit(void);
92
93 #define AVX_GEN2_OPTSIZE 640
94 #define AVX_GEN4_OPTSIZE 4096
95
96 #ifdef CONFIG_X86_64
97
98 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
99                               const u8 *in, unsigned int len, u8 *iv);
100 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
101                               const u8 *in, unsigned int len, u8 *iv);
102
103 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
104                                  const u8 *in, bool enc, u8 *iv);
105
106 /* asmlinkage void aesni_gcm_enc()
107  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
108  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
109  * const u8 *in, Plaintext input
110  * unsigned long plaintext_len, Length of data in bytes for encryption.
111  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
112  *         16-byte aligned pointer.
113  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
114  * const u8 *aad, Additional Authentication Data (AAD)
115  * unsigned long aad_len, Length of AAD in bytes.
116  * u8 *auth_tag, Authenticated Tag output.
117  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
118  *          Valid values are 16 (most likely), 12 or 8.
119  */
120 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
121                         const u8 *in, unsigned long plaintext_len, u8 *iv,
122                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
123                         u8 *auth_tag, unsigned long auth_tag_len);
124
125 /* asmlinkage void aesni_gcm_dec()
126  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
127  * u8 *out, Plaintext output. Decrypt in-place is allowed.
128  * const u8 *in, Ciphertext input
129  * unsigned long ciphertext_len, Length of data in bytes for decryption.
130  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
131  *         16-byte aligned pointer.
132  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
133  * const u8 *aad, Additional Authentication Data (AAD)
134  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
135  * to be 8 or 12 bytes
136  * u8 *auth_tag, Authenticated Tag output.
137  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
138  * Valid values are 16 (most likely), 12 or 8.
139  */
140 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
141                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
142                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
143                         u8 *auth_tag, unsigned long auth_tag_len);
144
145
146 #ifdef CONFIG_AS_AVX
147 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
148                 void *keys, u8 *out, unsigned int num_bytes);
149 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
150                 void *keys, u8 *out, unsigned int num_bytes);
151 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
152                 void *keys, u8 *out, unsigned int num_bytes);
153 /*
154  * asmlinkage void aesni_gcm_precomp_avx_gen2()
155  * gcm_data *my_ctx_data, context data
156  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
157  */
158 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
159
160 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
161                         const u8 *in, unsigned long plaintext_len, u8 *iv,
162                         const u8 *aad, unsigned long aad_len,
163                         u8 *auth_tag, unsigned long auth_tag_len);
164
165 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
166                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
167                         const u8 *aad, unsigned long aad_len,
168                         u8 *auth_tag, unsigned long auth_tag_len);
169
170 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
171                         const u8 *in, unsigned long plaintext_len, u8 *iv,
172                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
173                         u8 *auth_tag, unsigned long auth_tag_len)
174 {
175         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
176         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
177                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
178                                 aad_len, auth_tag, auth_tag_len);
179         } else {
180                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
181                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
182                                         aad_len, auth_tag, auth_tag_len);
183         }
184 }
185
186 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
187                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
188                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
189                         u8 *auth_tag, unsigned long auth_tag_len)
190 {
191         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
192         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
193                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
194                                 aad_len, auth_tag, auth_tag_len);
195         } else {
196                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
197                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
198                                         aad_len, auth_tag, auth_tag_len);
199         }
200 }
201 #endif
202
203 #ifdef CONFIG_AS_AVX2
204 /*
205  * asmlinkage void aesni_gcm_precomp_avx_gen4()
206  * gcm_data *my_ctx_data, context data
207  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
208  */
209 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
210
211 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
212                         const u8 *in, unsigned long plaintext_len, u8 *iv,
213                         const u8 *aad, unsigned long aad_len,
214                         u8 *auth_tag, unsigned long auth_tag_len);
215
216 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
217                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
218                         const u8 *aad, unsigned long aad_len,
219                         u8 *auth_tag, unsigned long auth_tag_len);
220
221 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
222                         const u8 *in, unsigned long plaintext_len, u8 *iv,
223                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
224                         u8 *auth_tag, unsigned long auth_tag_len)
225 {
226        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
227         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
228                 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
229                                 aad_len, auth_tag, auth_tag_len);
230         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
231                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
232                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
233                                         aad_len, auth_tag, auth_tag_len);
234         } else {
235                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
236                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
237                                         aad_len, auth_tag, auth_tag_len);
238         }
239 }
240
241 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
242                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
243                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
244                         u8 *auth_tag, unsigned long auth_tag_len)
245 {
246        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
247         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
248                 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
249                                 aad, aad_len, auth_tag, auth_tag_len);
250         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
251                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
252                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
253                                         aad_len, auth_tag, auth_tag_len);
254         } else {
255                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
256                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
257                                         aad_len, auth_tag, auth_tag_len);
258         }
259 }
260 #endif
261
262 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
263                         const u8 *in, unsigned long plaintext_len, u8 *iv,
264                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
265                         u8 *auth_tag, unsigned long auth_tag_len);
266
267 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
268                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
269                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
270                         u8 *auth_tag, unsigned long auth_tag_len);
271
272 static inline struct
273 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
274 {
275         unsigned long align = AESNI_ALIGN;
276
277         if (align <= crypto_tfm_ctx_alignment())
278                 align = 1;
279         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
280 }
281
282 static inline struct
283 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
284 {
285         unsigned long align = AESNI_ALIGN;
286
287         if (align <= crypto_tfm_ctx_alignment())
288                 align = 1;
289         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
290 }
291 #endif
292
293 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
294 {
295         unsigned long addr = (unsigned long)raw_ctx;
296         unsigned long align = AESNI_ALIGN;
297
298         if (align <= crypto_tfm_ctx_alignment())
299                 align = 1;
300         return (struct crypto_aes_ctx *)ALIGN(addr, align);
301 }
302
303 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
304                               const u8 *in_key, unsigned int key_len)
305 {
306         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
307         u32 *flags = &tfm->crt_flags;
308         int err;
309
310         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
311             key_len != AES_KEYSIZE_256) {
312                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
313                 return -EINVAL;
314         }
315
316         if (!irq_fpu_usable())
317                 err = crypto_aes_expand_key(ctx, in_key, key_len);
318         else {
319                 kernel_fpu_begin();
320                 err = aesni_set_key(ctx, in_key, key_len);
321                 kernel_fpu_end();
322         }
323
324         return err;
325 }
326
327 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
328                        unsigned int key_len)
329 {
330         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
331 }
332
333 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334 {
335         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
337         if (!irq_fpu_usable())
338                 crypto_aes_encrypt_x86(ctx, dst, src);
339         else {
340                 kernel_fpu_begin();
341                 aesni_enc(ctx, dst, src);
342                 kernel_fpu_end();
343         }
344 }
345
346 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347 {
348         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
350         if (!irq_fpu_usable())
351                 crypto_aes_decrypt_x86(ctx, dst, src);
352         else {
353                 kernel_fpu_begin();
354                 aesni_dec(ctx, dst, src);
355                 kernel_fpu_end();
356         }
357 }
358
359 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
360 {
361         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
362
363         aesni_enc(ctx, dst, src);
364 }
365
366 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
367 {
368         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
369
370         aesni_dec(ctx, dst, src);
371 }
372
373 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
374                                  unsigned int len)
375 {
376         return aes_set_key_common(crypto_skcipher_tfm(tfm),
377                                   crypto_skcipher_ctx(tfm), key, len);
378 }
379
380 static int ecb_encrypt(struct skcipher_request *req)
381 {
382         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
383         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
384         struct skcipher_walk walk;
385         unsigned int nbytes;
386         int err;
387
388         err = skcipher_walk_virt(&walk, req, true);
389
390         kernel_fpu_begin();
391         while ((nbytes = walk.nbytes)) {
392                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
393                               nbytes & AES_BLOCK_MASK);
394                 nbytes &= AES_BLOCK_SIZE - 1;
395                 err = skcipher_walk_done(&walk, nbytes);
396         }
397         kernel_fpu_end();
398
399         return err;
400 }
401
402 static int ecb_decrypt(struct skcipher_request *req)
403 {
404         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
405         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
406         struct skcipher_walk walk;
407         unsigned int nbytes;
408         int err;
409
410         err = skcipher_walk_virt(&walk, req, true);
411
412         kernel_fpu_begin();
413         while ((nbytes = walk.nbytes)) {
414                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
415                               nbytes & AES_BLOCK_MASK);
416                 nbytes &= AES_BLOCK_SIZE - 1;
417                 err = skcipher_walk_done(&walk, nbytes);
418         }
419         kernel_fpu_end();
420
421         return err;
422 }
423
424 static int cbc_encrypt(struct skcipher_request *req)
425 {
426         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
427         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
428         struct skcipher_walk walk;
429         unsigned int nbytes;
430         int err;
431
432         err = skcipher_walk_virt(&walk, req, true);
433
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK, walk.iv);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = skcipher_walk_done(&walk, nbytes);
440         }
441         kernel_fpu_end();
442
443         return err;
444 }
445
446 static int cbc_decrypt(struct skcipher_request *req)
447 {
448         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
450         struct skcipher_walk walk;
451         unsigned int nbytes;
452         int err;
453
454         err = skcipher_walk_virt(&walk, req, true);
455
456         kernel_fpu_begin();
457         while ((nbytes = walk.nbytes)) {
458                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
459                               nbytes & AES_BLOCK_MASK, walk.iv);
460                 nbytes &= AES_BLOCK_SIZE - 1;
461                 err = skcipher_walk_done(&walk, nbytes);
462         }
463         kernel_fpu_end();
464
465         return err;
466 }
467
468 #ifdef CONFIG_X86_64
469 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
470                             struct skcipher_walk *walk)
471 {
472         u8 *ctrblk = walk->iv;
473         u8 keystream[AES_BLOCK_SIZE];
474         u8 *src = walk->src.virt.addr;
475         u8 *dst = walk->dst.virt.addr;
476         unsigned int nbytes = walk->nbytes;
477
478         aesni_enc(ctx, keystream, ctrblk);
479         crypto_xor_cpy(dst, keystream, src, nbytes);
480
481         crypto_inc(ctrblk, AES_BLOCK_SIZE);
482 }
483
484 #ifdef CONFIG_AS_AVX
485 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
486                               const u8 *in, unsigned int len, u8 *iv)
487 {
488         /*
489          * based on key length, override with the by8 version
490          * of ctr mode encryption/decryption for improved performance
491          * aes_set_key_common() ensures that key length is one of
492          * {128,192,256}
493          */
494         if (ctx->key_length == AES_KEYSIZE_128)
495                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
496         else if (ctx->key_length == AES_KEYSIZE_192)
497                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
498         else
499                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
500 }
501 #endif
502
503 static int ctr_crypt(struct skcipher_request *req)
504 {
505         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
506         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
507         struct skcipher_walk walk;
508         unsigned int nbytes;
509         int err;
510
511         err = skcipher_walk_virt(&walk, req, true);
512
513         kernel_fpu_begin();
514         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
515                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
516                                       nbytes & AES_BLOCK_MASK, walk.iv);
517                 nbytes &= AES_BLOCK_SIZE - 1;
518                 err = skcipher_walk_done(&walk, nbytes);
519         }
520         if (walk.nbytes) {
521                 ctr_crypt_final(ctx, &walk);
522                 err = skcipher_walk_done(&walk, 0);
523         }
524         kernel_fpu_end();
525
526         return err;
527 }
528
529 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
530                             unsigned int keylen)
531 {
532         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
533         int err;
534
535         err = xts_verify_key(tfm, key, keylen);
536         if (err)
537                 return err;
538
539         keylen /= 2;
540
541         /* first half of xts-key is for crypt */
542         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
543                                  key, keylen);
544         if (err)
545                 return err;
546
547         /* second half of xts-key is for tweak */
548         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
549                                   key + keylen, keylen);
550 }
551
552
553 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
554 {
555         aesni_enc(ctx, out, in);
556 }
557
558 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
559 {
560         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
561 }
562
563 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
564 {
565         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
566 }
567
568 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
569 {
570         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
571 }
572
573 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
574 {
575         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
576 }
577
578 static const struct common_glue_ctx aesni_enc_xts = {
579         .num_funcs = 2,
580         .fpu_blocks_limit = 1,
581
582         .funcs = { {
583                 .num_blocks = 8,
584                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
585         }, {
586                 .num_blocks = 1,
587                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
588         } }
589 };
590
591 static const struct common_glue_ctx aesni_dec_xts = {
592         .num_funcs = 2,
593         .fpu_blocks_limit = 1,
594
595         .funcs = { {
596                 .num_blocks = 8,
597                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
598         }, {
599                 .num_blocks = 1,
600                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
601         } }
602 };
603
604 static int xts_encrypt(struct skcipher_request *req)
605 {
606         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
607         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
608
609         return glue_xts_req_128bit(&aesni_enc_xts, req,
610                                    XTS_TWEAK_CAST(aesni_xts_tweak),
611                                    aes_ctx(ctx->raw_tweak_ctx),
612                                    aes_ctx(ctx->raw_crypt_ctx));
613 }
614
615 static int xts_decrypt(struct skcipher_request *req)
616 {
617         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
618         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
619
620         return glue_xts_req_128bit(&aesni_dec_xts, req,
621                                    XTS_TWEAK_CAST(aesni_xts_tweak),
622                                    aes_ctx(ctx->raw_tweak_ctx),
623                                    aes_ctx(ctx->raw_crypt_ctx));
624 }
625
626 static int rfc4106_init(struct crypto_aead *aead)
627 {
628         struct cryptd_aead *cryptd_tfm;
629         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
630
631         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
632                                        CRYPTO_ALG_INTERNAL,
633                                        CRYPTO_ALG_INTERNAL);
634         if (IS_ERR(cryptd_tfm))
635                 return PTR_ERR(cryptd_tfm);
636
637         *ctx = cryptd_tfm;
638         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
639         return 0;
640 }
641
642 static void rfc4106_exit(struct crypto_aead *aead)
643 {
644         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
645
646         cryptd_free_aead(*ctx);
647 }
648
649 static int
650 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
651 {
652         struct crypto_cipher *tfm;
653         int ret;
654
655         tfm = crypto_alloc_cipher("aes", 0, 0);
656         if (IS_ERR(tfm))
657                 return PTR_ERR(tfm);
658
659         ret = crypto_cipher_setkey(tfm, key, key_len);
660         if (ret)
661                 goto out_free_cipher;
662
663         /* Clear the data in the hash sub key container to zero.*/
664         /* We want to cipher all zeros to create the hash sub key. */
665         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
666
667         crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
668
669 out_free_cipher:
670         crypto_free_cipher(tfm);
671         return ret;
672 }
673
674 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
675                                   unsigned int key_len)
676 {
677         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
678
679         if (key_len < 4) {
680                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
681                 return -EINVAL;
682         }
683         /*Account for 4 byte nonce at the end.*/
684         key_len -= 4;
685
686         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
687
688         return aes_set_key_common(crypto_aead_tfm(aead),
689                                   &ctx->aes_key_expanded, key, key_len) ?:
690                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
691 }
692
693 static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
694                                   unsigned int key_len)
695 {
696         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
697         struct cryptd_aead *cryptd_tfm = *ctx;
698
699         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
700 }
701
702 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
703                                        unsigned int authsize)
704 {
705         switch (authsize) {
706         case 8:
707         case 12:
708         case 16:
709                 break;
710         default:
711                 return -EINVAL;
712         }
713
714         return 0;
715 }
716
717 /* This is the Integrity Check Value (aka the authentication tag length and can
718  * be 8, 12 or 16 bytes long. */
719 static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
720                                        unsigned int authsize)
721 {
722         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
723         struct cryptd_aead *cryptd_tfm = *ctx;
724
725         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
726 }
727
728 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
729                                        unsigned int authsize)
730 {
731         switch (authsize) {
732         case 4:
733         case 8:
734         case 12:
735         case 13:
736         case 14:
737         case 15:
738         case 16:
739                 break;
740         default:
741                 return -EINVAL;
742         }
743
744         return 0;
745 }
746
747 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
748                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
749 {
750         u8 one_entry_in_sg = 0;
751         u8 *src, *dst, *assoc;
752         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
753         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
754         struct scatter_walk src_sg_walk;
755         struct scatter_walk dst_sg_walk = {};
756
757         if (sg_is_last(req->src) &&
758             (!PageHighMem(sg_page(req->src)) ||
759             req->src->offset + req->src->length <= PAGE_SIZE) &&
760             sg_is_last(req->dst) &&
761             (!PageHighMem(sg_page(req->dst)) ||
762             req->dst->offset + req->dst->length <= PAGE_SIZE)) {
763                 one_entry_in_sg = 1;
764                 scatterwalk_start(&src_sg_walk, req->src);
765                 assoc = scatterwalk_map(&src_sg_walk);
766                 src = assoc + req->assoclen;
767                 dst = src;
768                 if (unlikely(req->src != req->dst)) {
769                         scatterwalk_start(&dst_sg_walk, req->dst);
770                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
771                 }
772         } else {
773                 /* Allocate memory for src, dst, assoc */
774                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
775                         GFP_ATOMIC);
776                 if (unlikely(!assoc))
777                         return -ENOMEM;
778                 scatterwalk_map_and_copy(assoc, req->src, 0,
779                                          req->assoclen + req->cryptlen, 0);
780                 src = assoc + req->assoclen;
781                 dst = src;
782         }
783
784         kernel_fpu_begin();
785         aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
786                           hash_subkey, assoc, assoclen,
787                           dst + req->cryptlen, auth_tag_len);
788         kernel_fpu_end();
789
790         /* The authTag (aka the Integrity Check Value) needs to be written
791          * back to the packet. */
792         if (one_entry_in_sg) {
793                 if (unlikely(req->src != req->dst)) {
794                         scatterwalk_unmap(dst - req->assoclen);
795                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
796                         scatterwalk_done(&dst_sg_walk, 1, 0);
797                 }
798                 scatterwalk_unmap(assoc);
799                 scatterwalk_advance(&src_sg_walk, req->src->length);
800                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
801         } else {
802                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
803                                          req->cryptlen + auth_tag_len, 1);
804                 kfree(assoc);
805         }
806         return 0;
807 }
808
809 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
810                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
811 {
812         u8 one_entry_in_sg = 0;
813         u8 *src, *dst, *assoc;
814         unsigned long tempCipherLen = 0;
815         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
816         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
817         u8 authTag[16];
818         struct scatter_walk src_sg_walk;
819         struct scatter_walk dst_sg_walk = {};
820         int retval = 0;
821
822         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
823
824         if (sg_is_last(req->src) &&
825             (!PageHighMem(sg_page(req->src)) ||
826             req->src->offset + req->src->length <= PAGE_SIZE) &&
827             sg_is_last(req->dst) && req->dst->length &&
828             (!PageHighMem(sg_page(req->dst)) ||
829             req->dst->offset + req->dst->length <= PAGE_SIZE)) {
830                 one_entry_in_sg = 1;
831                 scatterwalk_start(&src_sg_walk, req->src);
832                 assoc = scatterwalk_map(&src_sg_walk);
833                 src = assoc + req->assoclen;
834                 dst = src;
835                 if (unlikely(req->src != req->dst)) {
836                         scatterwalk_start(&dst_sg_walk, req->dst);
837                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
838                 }
839         } else {
840                 /* Allocate memory for src, dst, assoc */
841                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
842                 if (!assoc)
843                         return -ENOMEM;
844                 scatterwalk_map_and_copy(assoc, req->src, 0,
845                                          req->assoclen + req->cryptlen, 0);
846                 src = assoc + req->assoclen;
847                 dst = src;
848         }
849
850
851         kernel_fpu_begin();
852         aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
853                           hash_subkey, assoc, assoclen,
854                           authTag, auth_tag_len);
855         kernel_fpu_end();
856
857         /* Compare generated tag with passed in tag. */
858         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
859                 -EBADMSG : 0;
860
861         if (one_entry_in_sg) {
862                 if (unlikely(req->src != req->dst)) {
863                         scatterwalk_unmap(dst - req->assoclen);
864                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
865                         scatterwalk_done(&dst_sg_walk, 1, 0);
866                 }
867                 scatterwalk_unmap(assoc);
868                 scatterwalk_advance(&src_sg_walk, req->src->length);
869                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
870         } else {
871                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
872                                          tempCipherLen, 1);
873                 kfree(assoc);
874         }
875         return retval;
876
877 }
878
879 static int helper_rfc4106_encrypt(struct aead_request *req)
880 {
881         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
882         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
883         void *aes_ctx = &(ctx->aes_key_expanded);
884         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
885         unsigned int i;
886         __be32 counter = cpu_to_be32(1);
887
888         /* Assuming we are supporting rfc4106 64-bit extended */
889         /* sequence numbers We need to have the AAD length equal */
890         /* to 16 or 20 bytes */
891         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
892                 return -EINVAL;
893
894         /* IV below built */
895         for (i = 0; i < 4; i++)
896                 *(iv+i) = ctx->nonce[i];
897         for (i = 0; i < 8; i++)
898                 *(iv+4+i) = req->iv[i];
899         *((__be32 *)(iv+12)) = counter;
900
901         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
902                               aes_ctx);
903 }
904
905 static int helper_rfc4106_decrypt(struct aead_request *req)
906 {
907         __be32 counter = cpu_to_be32(1);
908         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
909         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
910         void *aes_ctx = &(ctx->aes_key_expanded);
911         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
912         unsigned int i;
913
914         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
915                 return -EINVAL;
916
917         /* Assuming we are supporting rfc4106 64-bit extended */
918         /* sequence numbers We need to have the AAD length */
919         /* equal to 16 or 20 bytes */
920
921         /* IV below built */
922         for (i = 0; i < 4; i++)
923                 *(iv+i) = ctx->nonce[i];
924         for (i = 0; i < 8; i++)
925                 *(iv+4+i) = req->iv[i];
926         *((__be32 *)(iv+12)) = counter;
927
928         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
929                               aes_ctx);
930 }
931
932 static int gcmaes_wrapper_encrypt(struct aead_request *req)
933 {
934         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
935         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
936         struct cryptd_aead *cryptd_tfm = *ctx;
937
938         tfm = &cryptd_tfm->base;
939         if (irq_fpu_usable() && (!in_atomic() ||
940                                  !cryptd_aead_queued(cryptd_tfm)))
941                 tfm = cryptd_aead_child(cryptd_tfm);
942
943         aead_request_set_tfm(req, tfm);
944
945         return crypto_aead_encrypt(req);
946 }
947
948 static int gcmaes_wrapper_decrypt(struct aead_request *req)
949 {
950         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
951         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
952         struct cryptd_aead *cryptd_tfm = *ctx;
953
954         tfm = &cryptd_tfm->base;
955         if (irq_fpu_usable() && (!in_atomic() ||
956                                  !cryptd_aead_queued(cryptd_tfm)))
957                 tfm = cryptd_aead_child(cryptd_tfm);
958
959         aead_request_set_tfm(req, tfm);
960
961         return crypto_aead_decrypt(req);
962 }
963 #endif
964
965 static struct crypto_alg aesni_algs[] = { {
966         .cra_name               = "aes",
967         .cra_driver_name        = "aes-aesni",
968         .cra_priority           = 300,
969         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
970         .cra_blocksize          = AES_BLOCK_SIZE,
971         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
972         .cra_module             = THIS_MODULE,
973         .cra_u  = {
974                 .cipher = {
975                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
976                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
977                         .cia_setkey             = aes_set_key,
978                         .cia_encrypt            = aes_encrypt,
979                         .cia_decrypt            = aes_decrypt
980                 }
981         }
982 }, {
983         .cra_name               = "__aes",
984         .cra_driver_name        = "__aes-aesni",
985         .cra_priority           = 300,
986         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
987         .cra_blocksize          = AES_BLOCK_SIZE,
988         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
989         .cra_module             = THIS_MODULE,
990         .cra_u  = {
991                 .cipher = {
992                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
993                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
994                         .cia_setkey             = aes_set_key,
995                         .cia_encrypt            = __aes_encrypt,
996                         .cia_decrypt            = __aes_decrypt
997                 }
998         }
999 } };
1000
1001 static struct skcipher_alg aesni_skciphers[] = {
1002         {
1003                 .base = {
1004                         .cra_name               = "__ecb(aes)",
1005                         .cra_driver_name        = "__ecb-aes-aesni",
1006                         .cra_priority           = 400,
1007                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1008                         .cra_blocksize          = AES_BLOCK_SIZE,
1009                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1010                         .cra_module             = THIS_MODULE,
1011                 },
1012                 .min_keysize    = AES_MIN_KEY_SIZE,
1013                 .max_keysize    = AES_MAX_KEY_SIZE,
1014                 .setkey         = aesni_skcipher_setkey,
1015                 .encrypt        = ecb_encrypt,
1016                 .decrypt        = ecb_decrypt,
1017         }, {
1018                 .base = {
1019                         .cra_name               = "__cbc(aes)",
1020                         .cra_driver_name        = "__cbc-aes-aesni",
1021                         .cra_priority           = 400,
1022                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1023                         .cra_blocksize          = AES_BLOCK_SIZE,
1024                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1025                         .cra_module             = THIS_MODULE,
1026                 },
1027                 .min_keysize    = AES_MIN_KEY_SIZE,
1028                 .max_keysize    = AES_MAX_KEY_SIZE,
1029                 .ivsize         = AES_BLOCK_SIZE,
1030                 .setkey         = aesni_skcipher_setkey,
1031                 .encrypt        = cbc_encrypt,
1032                 .decrypt        = cbc_decrypt,
1033 #ifdef CONFIG_X86_64
1034         }, {
1035                 .base = {
1036                         .cra_name               = "__ctr(aes)",
1037                         .cra_driver_name        = "__ctr-aes-aesni",
1038                         .cra_priority           = 400,
1039                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1040                         .cra_blocksize          = 1,
1041                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1042                         .cra_module             = THIS_MODULE,
1043                 },
1044                 .min_keysize    = AES_MIN_KEY_SIZE,
1045                 .max_keysize    = AES_MAX_KEY_SIZE,
1046                 .ivsize         = AES_BLOCK_SIZE,
1047                 .chunksize      = AES_BLOCK_SIZE,
1048                 .setkey         = aesni_skcipher_setkey,
1049                 .encrypt        = ctr_crypt,
1050                 .decrypt        = ctr_crypt,
1051         }, {
1052                 .base = {
1053                         .cra_name               = "__xts(aes)",
1054                         .cra_driver_name        = "__xts-aes-aesni",
1055                         .cra_priority           = 401,
1056                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1057                         .cra_blocksize          = AES_BLOCK_SIZE,
1058                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
1059                         .cra_module             = THIS_MODULE,
1060                 },
1061                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1062                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1063                 .ivsize         = AES_BLOCK_SIZE,
1064                 .setkey         = xts_aesni_setkey,
1065                 .encrypt        = xts_encrypt,
1066                 .decrypt        = xts_decrypt,
1067 #endif
1068         }
1069 };
1070
1071 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1072
1073 struct {
1074         const char *algname;
1075         const char *drvname;
1076         const char *basename;
1077         struct simd_skcipher_alg *simd;
1078 } aesni_simd_skciphers2[] = {
1079 #if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1080     IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1081         {
1082                 .algname        = "pcbc(aes)",
1083                 .drvname        = "pcbc-aes-aesni",
1084                 .basename       = "fpu(pcbc(__aes-aesni))",
1085         },
1086 #endif
1087 };
1088
1089 #ifdef CONFIG_X86_64
1090 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1091                                   unsigned int key_len)
1092 {
1093         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1094
1095         return aes_set_key_common(crypto_aead_tfm(aead),
1096                                   &ctx->aes_key_expanded, key, key_len) ?:
1097                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1098 }
1099
1100 static int generic_gcmaes_encrypt(struct aead_request *req)
1101 {
1102         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1103         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1104         void *aes_ctx = &(ctx->aes_key_expanded);
1105         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1106         __be32 counter = cpu_to_be32(1);
1107
1108         memcpy(iv, req->iv, 12);
1109         *((__be32 *)(iv+12)) = counter;
1110
1111         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1112                               aes_ctx);
1113 }
1114
1115 static int generic_gcmaes_decrypt(struct aead_request *req)
1116 {
1117         __be32 counter = cpu_to_be32(1);
1118         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1119         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1120         void *aes_ctx = &(ctx->aes_key_expanded);
1121         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1122
1123         memcpy(iv, req->iv, 12);
1124         *((__be32 *)(iv+12)) = counter;
1125
1126         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1127                               aes_ctx);
1128 }
1129
1130 static int generic_gcmaes_init(struct crypto_aead *aead)
1131 {
1132         struct cryptd_aead *cryptd_tfm;
1133         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1134
1135         cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1136                                        CRYPTO_ALG_INTERNAL,
1137                                        CRYPTO_ALG_INTERNAL);
1138         if (IS_ERR(cryptd_tfm))
1139                 return PTR_ERR(cryptd_tfm);
1140
1141         *ctx = cryptd_tfm;
1142         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1143
1144         return 0;
1145 }
1146
1147 static void generic_gcmaes_exit(struct crypto_aead *aead)
1148 {
1149         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1150
1151         cryptd_free_aead(*ctx);
1152 }
1153
1154 static struct aead_alg aesni_aead_algs[] = { {
1155         .setkey                 = common_rfc4106_set_key,
1156         .setauthsize            = common_rfc4106_set_authsize,
1157         .encrypt                = helper_rfc4106_encrypt,
1158         .decrypt                = helper_rfc4106_decrypt,
1159         .ivsize                 = GCM_RFC4106_IV_SIZE,
1160         .maxauthsize            = 16,
1161         .base = {
1162                 .cra_name               = "__gcm-aes-aesni",
1163                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1164                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1165                 .cra_blocksize          = 1,
1166                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1167                 .cra_alignmask          = AESNI_ALIGN - 1,
1168                 .cra_module             = THIS_MODULE,
1169         },
1170 }, {
1171         .init                   = rfc4106_init,
1172         .exit                   = rfc4106_exit,
1173         .setkey                 = gcmaes_wrapper_set_key,
1174         .setauthsize            = gcmaes_wrapper_set_authsize,
1175         .encrypt                = gcmaes_wrapper_encrypt,
1176         .decrypt                = gcmaes_wrapper_decrypt,
1177         .ivsize                 = GCM_RFC4106_IV_SIZE,
1178         .maxauthsize            = 16,
1179         .base = {
1180                 .cra_name               = "rfc4106(gcm(aes))",
1181                 .cra_driver_name        = "rfc4106-gcm-aesni",
1182                 .cra_priority           = 400,
1183                 .cra_flags              = CRYPTO_ALG_ASYNC,
1184                 .cra_blocksize          = 1,
1185                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1186                 .cra_module             = THIS_MODULE,
1187         },
1188 }, {
1189         .setkey                 = generic_gcmaes_set_key,
1190         .setauthsize            = generic_gcmaes_set_authsize,
1191         .encrypt                = generic_gcmaes_encrypt,
1192         .decrypt                = generic_gcmaes_decrypt,
1193         .ivsize                 = GCM_AES_IV_SIZE,
1194         .maxauthsize            = 16,
1195         .base = {
1196                 .cra_name               = "__generic-gcm-aes-aesni",
1197                 .cra_driver_name        = "__driver-generic-gcm-aes-aesni",
1198                 .cra_priority           = 0,
1199                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1200                 .cra_blocksize          = 1,
1201                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1202                 .cra_alignmask          = AESNI_ALIGN - 1,
1203                 .cra_module             = THIS_MODULE,
1204         },
1205 }, {
1206         .init                   = generic_gcmaes_init,
1207         .exit                   = generic_gcmaes_exit,
1208         .setkey                 = gcmaes_wrapper_set_key,
1209         .setauthsize            = gcmaes_wrapper_set_authsize,
1210         .encrypt                = gcmaes_wrapper_encrypt,
1211         .decrypt                = gcmaes_wrapper_decrypt,
1212         .ivsize                 = GCM_AES_IV_SIZE,
1213         .maxauthsize            = 16,
1214         .base = {
1215                 .cra_name               = "gcm(aes)",
1216                 .cra_driver_name        = "generic-gcm-aesni",
1217                 .cra_priority           = 400,
1218                 .cra_flags              = CRYPTO_ALG_ASYNC,
1219                 .cra_blocksize          = 1,
1220                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1221                 .cra_module             = THIS_MODULE,
1222         },
1223 } };
1224 #else
1225 static struct aead_alg aesni_aead_algs[0];
1226 #endif
1227
1228
1229 static const struct x86_cpu_id aesni_cpu_id[] = {
1230         X86_FEATURE_MATCH(X86_FEATURE_AES),
1231         {}
1232 };
1233 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1234
1235 static void aesni_free_simds(void)
1236 {
1237         int i;
1238
1239         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1240                     aesni_simd_skciphers[i]; i++)
1241                 simd_skcipher_free(aesni_simd_skciphers[i]);
1242
1243         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1244                 if (aesni_simd_skciphers2[i].simd)
1245                         simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1246 }
1247
1248 static int __init aesni_init(void)
1249 {
1250         struct simd_skcipher_alg *simd;
1251         const char *basename;
1252         const char *algname;
1253         const char *drvname;
1254         int err;
1255         int i;
1256
1257         if (!x86_match_cpu(aesni_cpu_id))
1258                 return -ENODEV;
1259 #ifdef CONFIG_X86_64
1260 #ifdef CONFIG_AS_AVX2
1261         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1262                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1263                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1264                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1265         } else
1266 #endif
1267 #ifdef CONFIG_AS_AVX
1268         if (boot_cpu_has(X86_FEATURE_AVX)) {
1269                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1270                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1271                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1272         } else
1273 #endif
1274         {
1275                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1276                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1277                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1278         }
1279         aesni_ctr_enc_tfm = aesni_ctr_enc;
1280 #ifdef CONFIG_AS_AVX
1281         if (boot_cpu_has(X86_FEATURE_AVX)) {
1282                 /* optimize performance of ctr mode encryption transform */
1283                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1284                 pr_info("AES CTR mode by8 optimization enabled\n");
1285         }
1286 #endif
1287 #endif
1288
1289         err = crypto_fpu_init();
1290         if (err)
1291                 return err;
1292
1293         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1294         if (err)
1295                 goto fpu_exit;
1296
1297         err = crypto_register_skciphers(aesni_skciphers,
1298                                         ARRAY_SIZE(aesni_skciphers));
1299         if (err)
1300                 goto unregister_algs;
1301
1302         err = crypto_register_aeads(aesni_aead_algs,
1303                                     ARRAY_SIZE(aesni_aead_algs));
1304         if (err)
1305                 goto unregister_skciphers;
1306
1307         for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1308                 algname = aesni_skciphers[i].base.cra_name + 2;
1309                 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1310                 basename = aesni_skciphers[i].base.cra_driver_name;
1311                 simd = simd_skcipher_create_compat(algname, drvname, basename);
1312                 err = PTR_ERR(simd);
1313                 if (IS_ERR(simd))
1314                         goto unregister_simds;
1315
1316                 aesni_simd_skciphers[i] = simd;
1317         }
1318
1319         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
1320                 algname = aesni_simd_skciphers2[i].algname;
1321                 drvname = aesni_simd_skciphers2[i].drvname;
1322                 basename = aesni_simd_skciphers2[i].basename;
1323                 simd = simd_skcipher_create_compat(algname, drvname, basename);
1324                 err = PTR_ERR(simd);
1325                 if (IS_ERR(simd))
1326                         continue;
1327
1328                 aesni_simd_skciphers2[i].simd = simd;
1329         }
1330
1331         return 0;
1332
1333 unregister_simds:
1334         aesni_free_simds();
1335         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1336 unregister_skciphers:
1337         crypto_unregister_skciphers(aesni_skciphers,
1338                                     ARRAY_SIZE(aesni_skciphers));
1339 unregister_algs:
1340         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1341 fpu_exit:
1342         crypto_fpu_exit();
1343         return err;
1344 }
1345
1346 static void __exit aesni_exit(void)
1347 {
1348         aesni_free_simds();
1349         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1350         crypto_unregister_skciphers(aesni_skciphers,
1351                                     ARRAY_SIZE(aesni_skciphers));
1352         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1353
1354         crypto_fpu_exit();
1355 }
1356
1357 late_initcall(aesni_init);
1358 module_exit(aesni_exit);
1359
1360 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1361 MODULE_LICENSE("GPL");
1362 MODULE_ALIAS_CRYPTO("aes");