GNU Linux-libre 4.14.262-gnu1
[releases.git] / arch / x86 / crypto / glue_helper.c
1 /*
2  * Shared glue code for 128bit block ciphers
3  *
4  * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
5  *
6  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  * CTR part based on code (crypto/ctr.c) by:
9  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
24  * USA
25  *
26  */
27
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
31 #include <crypto/internal/skcipher.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/crypto/glue_helper.h>
35
36 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
37                                    struct blkcipher_desc *desc,
38                                    struct blkcipher_walk *walk)
39 {
40         void *ctx = crypto_blkcipher_ctx(desc->tfm);
41         const unsigned int bsize = 128 / 8;
42         unsigned int nbytes, i, func_bytes;
43         bool fpu_enabled = false;
44         int err;
45
46         err = blkcipher_walk_virt(desc, walk);
47
48         while ((nbytes = walk->nbytes)) {
49                 u8 *wsrc = walk->src.virt.addr;
50                 u8 *wdst = walk->dst.virt.addr;
51
52                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
53                                              desc, fpu_enabled, nbytes);
54
55                 for (i = 0; i < gctx->num_funcs; i++) {
56                         func_bytes = bsize * gctx->funcs[i].num_blocks;
57
58                         /* Process multi-block batch */
59                         if (nbytes >= func_bytes) {
60                                 do {
61                                         gctx->funcs[i].fn_u.ecb(ctx, wdst,
62                                                                 wsrc);
63
64                                         wsrc += func_bytes;
65                                         wdst += func_bytes;
66                                         nbytes -= func_bytes;
67                                 } while (nbytes >= func_bytes);
68
69                                 if (nbytes < bsize)
70                                         goto done;
71                         }
72                 }
73
74 done:
75                 err = blkcipher_walk_done(desc, walk, nbytes);
76         }
77
78         glue_fpu_end(fpu_enabled);
79         return err;
80 }
81
82 int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
83                           struct blkcipher_desc *desc, struct scatterlist *dst,
84                           struct scatterlist *src, unsigned int nbytes)
85 {
86         struct blkcipher_walk walk;
87
88         blkcipher_walk_init(&walk, dst, src, nbytes);
89         return __glue_ecb_crypt_128bit(gctx, desc, &walk);
90 }
91 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
92
93 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
94                                               struct blkcipher_desc *desc,
95                                               struct blkcipher_walk *walk)
96 {
97         void *ctx = crypto_blkcipher_ctx(desc->tfm);
98         const unsigned int bsize = 128 / 8;
99         unsigned int nbytes = walk->nbytes;
100         u128 *src = (u128 *)walk->src.virt.addr;
101         u128 *dst = (u128 *)walk->dst.virt.addr;
102         u128 *iv = (u128 *)walk->iv;
103
104         do {
105                 u128_xor(dst, src, iv);
106                 fn(ctx, (u8 *)dst, (u8 *)dst);
107                 iv = dst;
108
109                 src += 1;
110                 dst += 1;
111                 nbytes -= bsize;
112         } while (nbytes >= bsize);
113
114         *(u128 *)walk->iv = *iv;
115         return nbytes;
116 }
117
118 int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
119                             struct blkcipher_desc *desc,
120                             struct scatterlist *dst,
121                             struct scatterlist *src, unsigned int nbytes)
122 {
123         struct blkcipher_walk walk;
124         int err;
125
126         blkcipher_walk_init(&walk, dst, src, nbytes);
127         err = blkcipher_walk_virt(desc, &walk);
128
129         while ((nbytes = walk.nbytes)) {
130                 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
131                 err = blkcipher_walk_done(desc, &walk, nbytes);
132         }
133
134         return err;
135 }
136 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
137
138 static unsigned int
139 __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
140                           struct blkcipher_desc *desc,
141                           struct blkcipher_walk *walk)
142 {
143         void *ctx = crypto_blkcipher_ctx(desc->tfm);
144         const unsigned int bsize = 128 / 8;
145         unsigned int nbytes = walk->nbytes;
146         u128 *src = (u128 *)walk->src.virt.addr;
147         u128 *dst = (u128 *)walk->dst.virt.addr;
148         u128 last_iv;
149         unsigned int num_blocks, func_bytes;
150         unsigned int i;
151
152         /* Start of the last block. */
153         src += nbytes / bsize - 1;
154         dst += nbytes / bsize - 1;
155
156         last_iv = *src;
157
158         for (i = 0; i < gctx->num_funcs; i++) {
159                 num_blocks = gctx->funcs[i].num_blocks;
160                 func_bytes = bsize * num_blocks;
161
162                 /* Process multi-block batch */
163                 if (nbytes >= func_bytes) {
164                         do {
165                                 nbytes -= func_bytes - bsize;
166                                 src -= num_blocks - 1;
167                                 dst -= num_blocks - 1;
168
169                                 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
170
171                                 nbytes -= bsize;
172                                 if (nbytes < bsize)
173                                         goto done;
174
175                                 u128_xor(dst, dst, src - 1);
176                                 src -= 1;
177                                 dst -= 1;
178                         } while (nbytes >= func_bytes);
179                 }
180         }
181
182 done:
183         u128_xor(dst, dst, (u128 *)walk->iv);
184         *(u128 *)walk->iv = last_iv;
185
186         return nbytes;
187 }
188
189 int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
190                             struct blkcipher_desc *desc,
191                             struct scatterlist *dst,
192                             struct scatterlist *src, unsigned int nbytes)
193 {
194         const unsigned int bsize = 128 / 8;
195         bool fpu_enabled = false;
196         struct blkcipher_walk walk;
197         int err;
198
199         blkcipher_walk_init(&walk, dst, src, nbytes);
200         err = blkcipher_walk_virt(desc, &walk);
201
202         while ((nbytes = walk.nbytes)) {
203                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
204                                              desc, fpu_enabled, nbytes);
205                 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
206                 err = blkcipher_walk_done(desc, &walk, nbytes);
207         }
208
209         glue_fpu_end(fpu_enabled);
210         return err;
211 }
212 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
213
214 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
215                                         struct blkcipher_desc *desc,
216                                         struct blkcipher_walk *walk)
217 {
218         void *ctx = crypto_blkcipher_ctx(desc->tfm);
219         u8 *src = (u8 *)walk->src.virt.addr;
220         u8 *dst = (u8 *)walk->dst.virt.addr;
221         unsigned int nbytes = walk->nbytes;
222         le128 ctrblk;
223         u128 tmp;
224
225         be128_to_le128(&ctrblk, (be128 *)walk->iv);
226
227         memcpy(&tmp, src, nbytes);
228         fn_ctr(ctx, &tmp, &tmp, &ctrblk);
229         memcpy(dst, &tmp, nbytes);
230
231         le128_to_be128((be128 *)walk->iv, &ctrblk);
232 }
233
234 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
235                                             struct blkcipher_desc *desc,
236                                             struct blkcipher_walk *walk)
237 {
238         const unsigned int bsize = 128 / 8;
239         void *ctx = crypto_blkcipher_ctx(desc->tfm);
240         unsigned int nbytes = walk->nbytes;
241         u128 *src = (u128 *)walk->src.virt.addr;
242         u128 *dst = (u128 *)walk->dst.virt.addr;
243         le128 ctrblk;
244         unsigned int num_blocks, func_bytes;
245         unsigned int i;
246
247         be128_to_le128(&ctrblk, (be128 *)walk->iv);
248
249         /* Process multi-block batch */
250         for (i = 0; i < gctx->num_funcs; i++) {
251                 num_blocks = gctx->funcs[i].num_blocks;
252                 func_bytes = bsize * num_blocks;
253
254                 if (nbytes >= func_bytes) {
255                         do {
256                                 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
257
258                                 src += num_blocks;
259                                 dst += num_blocks;
260                                 nbytes -= func_bytes;
261                         } while (nbytes >= func_bytes);
262
263                         if (nbytes < bsize)
264                                 goto done;
265                 }
266         }
267
268 done:
269         le128_to_be128((be128 *)walk->iv, &ctrblk);
270         return nbytes;
271 }
272
273 int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
274                           struct blkcipher_desc *desc, struct scatterlist *dst,
275                           struct scatterlist *src, unsigned int nbytes)
276 {
277         const unsigned int bsize = 128 / 8;
278         bool fpu_enabled = false;
279         struct blkcipher_walk walk;
280         int err;
281
282         blkcipher_walk_init(&walk, dst, src, nbytes);
283         err = blkcipher_walk_virt_block(desc, &walk, bsize);
284
285         while ((nbytes = walk.nbytes) >= bsize) {
286                 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
287                                              desc, fpu_enabled, nbytes);
288                 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
289                 err = blkcipher_walk_done(desc, &walk, nbytes);
290         }
291
292         glue_fpu_end(fpu_enabled);
293
294         if (walk.nbytes) {
295                 glue_ctr_crypt_final_128bit(
296                         gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
297                 err = blkcipher_walk_done(desc, &walk, 0);
298         }
299
300         return err;
301 }
302 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
303
304 static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
305                                             void *ctx,
306                                             struct blkcipher_desc *desc,
307                                             struct blkcipher_walk *walk)
308 {
309         const unsigned int bsize = 128 / 8;
310         unsigned int nbytes = walk->nbytes;
311         u128 *src = (u128 *)walk->src.virt.addr;
312         u128 *dst = (u128 *)walk->dst.virt.addr;
313         unsigned int num_blocks, func_bytes;
314         unsigned int i;
315
316         /* Process multi-block batch */
317         for (i = 0; i < gctx->num_funcs; i++) {
318                 num_blocks = gctx->funcs[i].num_blocks;
319                 func_bytes = bsize * num_blocks;
320
321                 if (nbytes >= func_bytes) {
322                         do {
323                                 gctx->funcs[i].fn_u.xts(ctx, dst, src,
324                                                         (le128 *)walk->iv);
325
326                                 src += num_blocks;
327                                 dst += num_blocks;
328                                 nbytes -= func_bytes;
329                         } while (nbytes >= func_bytes);
330
331                         if (nbytes < bsize)
332                                 goto done;
333                 }
334         }
335
336 done:
337         return nbytes;
338 }
339
340 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
341                                           void *ctx,
342                                           struct skcipher_walk *walk)
343 {
344         const unsigned int bsize = 128 / 8;
345         unsigned int nbytes = walk->nbytes;
346         u128 *src = walk->src.virt.addr;
347         u128 *dst = walk->dst.virt.addr;
348         unsigned int num_blocks, func_bytes;
349         unsigned int i;
350
351         /* Process multi-block batch */
352         for (i = 0; i < gctx->num_funcs; i++) {
353                 num_blocks = gctx->funcs[i].num_blocks;
354                 func_bytes = bsize * num_blocks;
355
356                 if (nbytes >= func_bytes) {
357                         do {
358                                 gctx->funcs[i].fn_u.xts(ctx, dst, src,
359                                                         walk->iv);
360
361                                 src += num_blocks;
362                                 dst += num_blocks;
363                                 nbytes -= func_bytes;
364                         } while (nbytes >= func_bytes);
365
366                         if (nbytes < bsize)
367                                 goto done;
368                 }
369         }
370
371 done:
372         return nbytes;
373 }
374
375 /* for implementations implementing faster XTS IV generator */
376 int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
377                           struct blkcipher_desc *desc, struct scatterlist *dst,
378                           struct scatterlist *src, unsigned int nbytes,
379                           void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
380                           void *tweak_ctx, void *crypt_ctx)
381 {
382         const unsigned int bsize = 128 / 8;
383         bool fpu_enabled = false;
384         struct blkcipher_walk walk;
385         int err;
386
387         blkcipher_walk_init(&walk, dst, src, nbytes);
388
389         err = blkcipher_walk_virt(desc, &walk);
390         nbytes = walk.nbytes;
391         if (!nbytes)
392                 return err;
393
394         /* set minimum length to bsize, for tweak_fn */
395         fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
396                                      desc, fpu_enabled,
397                                      nbytes < bsize ? bsize : nbytes);
398
399         /* calculate first value of T */
400         tweak_fn(tweak_ctx, walk.iv, walk.iv);
401
402         while (nbytes) {
403                 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
404
405                 err = blkcipher_walk_done(desc, &walk, nbytes);
406                 nbytes = walk.nbytes;
407         }
408
409         glue_fpu_end(fpu_enabled);
410
411         return err;
412 }
413 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
414
415 int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
416                         struct skcipher_request *req,
417                         common_glue_func_t tweak_fn, void *tweak_ctx,
418                         void *crypt_ctx)
419 {
420         const unsigned int bsize = 128 / 8;
421         struct skcipher_walk walk;
422         bool fpu_enabled = false;
423         unsigned int nbytes;
424         int err;
425
426         err = skcipher_walk_virt(&walk, req, false);
427         nbytes = walk.nbytes;
428         if (!nbytes)
429                 return err;
430
431         /* set minimum length to bsize, for tweak_fn */
432         fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
433                                             &walk, fpu_enabled,
434                                             nbytes < bsize ? bsize : nbytes);
435
436         /* calculate first value of T */
437         tweak_fn(tweak_ctx, walk.iv, walk.iv);
438
439         while (nbytes) {
440                 nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
441
442                 err = skcipher_walk_done(&walk, nbytes);
443                 nbytes = walk.nbytes;
444         }
445
446         glue_fpu_end(fpu_enabled);
447
448         return err;
449 }
450 EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
451
452 void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
453                                common_glue_func_t fn)
454 {
455         le128 ivblk = *iv;
456
457         /* generate next IV */
458         gf128mul_x_ble(iv, &ivblk);
459
460         /* CC <- T xor C */
461         u128_xor(dst, src, (u128 *)&ivblk);
462
463         /* PP <- D(Key2,CC) */
464         fn(ctx, (u8 *)dst, (u8 *)dst);
465
466         /* P <- T xor PP */
467         u128_xor(dst, dst, (u128 *)&ivblk);
468 }
469 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
470
471 MODULE_LICENSE("GPL");