GNU Linux-libre 4.19.268-gnu1
[releases.git] / arch / x86 / crypto / morus1280_glue.c
1 /*
2  * The MORUS-1280 Authenticated-Encryption Algorithm
3  *   Common x86 SIMD glue skeleton
4  *
5  * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com>
6  * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  */
13
14 #include <crypto/cryptd.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/morus1280_glue.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <asm/fpu/api.h>
25
26 struct morus1280_state {
27         struct morus1280_block s[MORUS_STATE_BLOCKS];
28 };
29
30 struct morus1280_ops {
31         int (*skcipher_walk_init)(struct skcipher_walk *walk,
32                                   struct aead_request *req, bool atomic);
33
34         void (*crypt_blocks)(void *state, const void *src, void *dst,
35                              unsigned int length);
36         void (*crypt_tail)(void *state, const void *src, void *dst,
37                            unsigned int length);
38 };
39
40 static void crypto_morus1280_glue_process_ad(
41                 struct morus1280_state *state,
42                 const struct morus1280_glue_ops *ops,
43                 struct scatterlist *sg_src, unsigned int assoclen)
44 {
45         struct scatter_walk walk;
46         struct morus1280_block buf;
47         unsigned int pos = 0;
48
49         scatterwalk_start(&walk, sg_src);
50         while (assoclen != 0) {
51                 unsigned int size = scatterwalk_clamp(&walk, assoclen);
52                 unsigned int left = size;
53                 void *mapped = scatterwalk_map(&walk);
54                 const u8 *src = (const u8 *)mapped;
55
56                 if (pos + size >= MORUS1280_BLOCK_SIZE) {
57                         if (pos > 0) {
58                                 unsigned int fill = MORUS1280_BLOCK_SIZE - pos;
59                                 memcpy(buf.bytes + pos, src, fill);
60                                 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
61                                 pos = 0;
62                                 left -= fill;
63                                 src += fill;
64                         }
65
66                         ops->ad(state, src, left);
67                         src += left & ~(MORUS1280_BLOCK_SIZE - 1);
68                         left &= MORUS1280_BLOCK_SIZE - 1;
69                 }
70
71                 memcpy(buf.bytes + pos, src, left);
72
73                 pos += left;
74                 assoclen -= size;
75                 scatterwalk_unmap(mapped);
76                 scatterwalk_advance(&walk, size);
77                 scatterwalk_done(&walk, 0, assoclen);
78         }
79
80         if (pos > 0) {
81                 memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos);
82                 ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE);
83         }
84 }
85
86 static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
87                                                 struct morus1280_ops ops,
88                                                 struct skcipher_walk *walk)
89 {
90         while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
91                 ops.crypt_blocks(state, walk->src.virt.addr,
92                                  walk->dst.virt.addr,
93                                  round_down(walk->nbytes,
94                                             MORUS1280_BLOCK_SIZE));
95                 skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
96         }
97
98         if (walk->nbytes) {
99                 ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
100                                walk->nbytes);
101                 skcipher_walk_done(walk, 0);
102         }
103 }
104
105 int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
106                                  unsigned int keylen)
107 {
108         struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
109
110         if (keylen == MORUS1280_BLOCK_SIZE) {
111                 memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE);
112         } else if (keylen == MORUS1280_BLOCK_SIZE / 2) {
113                 memcpy(ctx->key.bytes, key, keylen);
114                 memcpy(ctx->key.bytes + keylen, key, keylen);
115         } else {
116                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
117                 return -EINVAL;
118         }
119
120         return 0;
121 }
122 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey);
123
124 int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
125                                       unsigned int authsize)
126 {
127         return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL;
128 }
129 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize);
130
131 static void crypto_morus1280_glue_crypt(struct aead_request *req,
132                                         struct morus1280_ops ops,
133                                         unsigned int cryptlen,
134                                         struct morus1280_block *tag_xor)
135 {
136         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
137         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
138         struct morus1280_state state;
139         struct skcipher_walk walk;
140
141         ops.skcipher_walk_init(&walk, req, true);
142
143         kernel_fpu_begin();
144
145         ctx->ops->init(&state, &ctx->key, req->iv);
146         crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
147         crypto_morus1280_glue_process_crypt(&state, ops, &walk);
148         ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
149
150         kernel_fpu_end();
151 }
152
153 int crypto_morus1280_glue_encrypt(struct aead_request *req)
154 {
155         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
156         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
157         struct morus1280_ops OPS = {
158                 .skcipher_walk_init = skcipher_walk_aead_encrypt,
159                 .crypt_blocks = ctx->ops->enc,
160                 .crypt_tail = ctx->ops->enc_tail,
161         };
162
163         struct morus1280_block tag = {};
164         unsigned int authsize = crypto_aead_authsize(tfm);
165         unsigned int cryptlen = req->cryptlen;
166
167         crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
168
169         scatterwalk_map_and_copy(tag.bytes, req->dst,
170                                  req->assoclen + cryptlen, authsize, 1);
171         return 0;
172 }
173 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt);
174
175 int crypto_morus1280_glue_decrypt(struct aead_request *req)
176 {
177         static const u8 zeros[MORUS1280_BLOCK_SIZE] = {};
178
179         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180         struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
181         struct morus1280_ops OPS = {
182                 .skcipher_walk_init = skcipher_walk_aead_decrypt,
183                 .crypt_blocks = ctx->ops->dec,
184                 .crypt_tail = ctx->ops->dec_tail,
185         };
186
187         struct morus1280_block tag;
188         unsigned int authsize = crypto_aead_authsize(tfm);
189         unsigned int cryptlen = req->cryptlen - authsize;
190
191         scatterwalk_map_and_copy(tag.bytes, req->src,
192                                  req->assoclen + cryptlen, authsize, 0);
193
194         crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag);
195
196         return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0;
197 }
198 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt);
199
200 void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
201                                     const struct morus1280_glue_ops *ops)
202 {
203         struct morus1280_ctx *ctx = crypto_aead_ctx(aead);
204         ctx->ops = ops;
205 }
206 EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
207
208 int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
209                                  unsigned int keylen)
210 {
211         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
212         struct cryptd_aead *cryptd_tfm = *ctx;
213
214         return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
215 }
216 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
217
218 int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
219                                       unsigned int authsize)
220 {
221         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
222         struct cryptd_aead *cryptd_tfm = *ctx;
223
224         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
225 }
226 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
227
228 int cryptd_morus1280_glue_encrypt(struct aead_request *req)
229 {
230         struct crypto_aead *aead = crypto_aead_reqtfm(req);
231         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
232         struct cryptd_aead *cryptd_tfm = *ctx;
233
234         aead = &cryptd_tfm->base;
235         if (irq_fpu_usable() && (!in_atomic() ||
236                                  !cryptd_aead_queued(cryptd_tfm)))
237                 aead = cryptd_aead_child(cryptd_tfm);
238
239         aead_request_set_tfm(req, aead);
240
241         return crypto_aead_encrypt(req);
242 }
243 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
244
245 int cryptd_morus1280_glue_decrypt(struct aead_request *req)
246 {
247         struct crypto_aead *aead = crypto_aead_reqtfm(req);
248         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
249         struct cryptd_aead *cryptd_tfm = *ctx;
250
251         aead = &cryptd_tfm->base;
252         if (irq_fpu_usable() && (!in_atomic() ||
253                                  !cryptd_aead_queued(cryptd_tfm)))
254                 aead = cryptd_aead_child(cryptd_tfm);
255
256         aead_request_set_tfm(req, aead);
257
258         return crypto_aead_decrypt(req);
259 }
260 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
261
262 int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
263 {
264         struct cryptd_aead *cryptd_tfm;
265         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
266         const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
267         char internal_name[CRYPTO_MAX_ALG_NAME];
268
269         if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
270                         >= CRYPTO_MAX_ALG_NAME)
271                 return -ENAMETOOLONG;
272
273         cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
274                                        CRYPTO_ALG_INTERNAL);
275         if (IS_ERR(cryptd_tfm))
276                 return PTR_ERR(cryptd_tfm);
277
278         *ctx = cryptd_tfm;
279         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
280         return 0;
281 }
282 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
283
284 void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
285 {
286         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
287
288         cryptd_free_aead(*ctx);
289 }
290 EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
291
292 MODULE_LICENSE("GPL");
293 MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
294 MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");