GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / crypto / virtio / virtio_crypto_algs.c
1  /* Algorithms supported by virtio crypto device
2   *
3   * Authors: Gonglei <arei.gonglei@huawei.com>
4   *
5   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
6   *
7   * This program is free software; you can redistribute it and/or modify
8   * it under the terms of the GNU General Public License as published by
9   * the Free Software Foundation; either version 2 of the License, or
10   * (at your option) any later version.
11   *
12   * This program is distributed in the hope that it will be useful,
13   * but WITHOUT ANY WARRANTY; without even the implied warranty of
14   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   * GNU General Public License for more details.
16   *
17   * You should have received a copy of the GNU General Public License
18   * along with this program; if not, see <http://www.gnu.org/licenses/>.
19   */
20
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
26
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
29
30
31 struct virtio_crypto_ablkcipher_ctx {
32         struct crypto_engine_ctx enginectx;
33         struct virtio_crypto *vcrypto;
34         struct crypto_tfm *tfm;
35
36         struct virtio_crypto_sym_session_info enc_sess_info;
37         struct virtio_crypto_sym_session_info dec_sess_info;
38 };
39
40 struct virtio_crypto_sym_request {
41         struct virtio_crypto_request base;
42
43         /* Cipher or aead */
44         uint32_t type;
45         struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
46         struct ablkcipher_request *ablkcipher_req;
47         uint8_t *iv;
48         /* Encryption? */
49         bool encrypt;
50 };
51
52 struct virtio_crypto_algo {
53         uint32_t algonum;
54         uint32_t service;
55         unsigned int active_devs;
56         struct crypto_alg algo;
57 };
58
59 /*
60  * The algs_lock protects the below global virtio_crypto_active_devs
61  * and crypto algorithms registion.
62  */
63 static DEFINE_MUTEX(algs_lock);
64 static void virtio_crypto_ablkcipher_finalize_req(
65         struct virtio_crypto_sym_request *vc_sym_req,
66         struct ablkcipher_request *req,
67         int err);
68
69 static void virtio_crypto_dataq_sym_callback
70                 (struct virtio_crypto_request *vc_req, int len)
71 {
72         struct virtio_crypto_sym_request *vc_sym_req =
73                 container_of(vc_req, struct virtio_crypto_sym_request, base);
74         struct ablkcipher_request *ablk_req;
75         int error;
76
77         /* Finish the encrypt or decrypt process */
78         if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
79                 switch (vc_req->status) {
80                 case VIRTIO_CRYPTO_OK:
81                         error = 0;
82                         break;
83                 case VIRTIO_CRYPTO_INVSESS:
84                 case VIRTIO_CRYPTO_ERR:
85                         error = -EINVAL;
86                         break;
87                 case VIRTIO_CRYPTO_BADMSG:
88                         error = -EBADMSG;
89                         break;
90                 default:
91                         error = -EIO;
92                         break;
93                 }
94                 ablk_req = vc_sym_req->ablkcipher_req;
95                 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
96                                                         ablk_req, error);
97         }
98 }
99
100 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
101 {
102         u64 total = 0;
103
104         for (total = 0; sg; sg = sg_next(sg))
105                 total += sg->length;
106
107         return total;
108 }
109
110 static int
111 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
112 {
113         switch (key_len) {
114         case AES_KEYSIZE_128:
115         case AES_KEYSIZE_192:
116         case AES_KEYSIZE_256:
117                 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
118                 break;
119         default:
120                 return -EINVAL;
121         }
122         return 0;
123 }
124
125 static int virtio_crypto_alg_ablkcipher_init_session(
126                 struct virtio_crypto_ablkcipher_ctx *ctx,
127                 uint32_t alg, const uint8_t *key,
128                 unsigned int keylen,
129                 int encrypt)
130 {
131         struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
132         unsigned int tmp;
133         struct virtio_crypto *vcrypto = ctx->vcrypto;
134         int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
135         int err;
136         unsigned int num_out = 0, num_in = 0;
137
138         /*
139          * Avoid to do DMA from the stack, switch to using
140          * dynamically-allocated for the key
141          */
142         uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
143
144         if (!cipher_key)
145                 return -ENOMEM;
146
147         memcpy(cipher_key, key, keylen);
148
149         spin_lock(&vcrypto->ctrl_lock);
150         /* Pad ctrl header */
151         vcrypto->ctrl.header.opcode =
152                 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
153         vcrypto->ctrl.header.algo = cpu_to_le32(alg);
154         /* Set the default dataqueue id to 0 */
155         vcrypto->ctrl.header.queue_id = 0;
156
157         vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
158         /* Pad cipher's parameters */
159         vcrypto->ctrl.u.sym_create_session.op_type =
160                 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
161         vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
162                 vcrypto->ctrl.header.algo;
163         vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
164                 cpu_to_le32(keylen);
165         vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
166                 cpu_to_le32(op);
167
168         sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
169         sgs[num_out++] = &outhdr;
170
171         /* Set key */
172         sg_init_one(&key_sg, cipher_key, keylen);
173         sgs[num_out++] = &key_sg;
174
175         /* Return status and session id back */
176         sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
177         sgs[num_out + num_in++] = &inhdr;
178
179         err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
180                                 num_in, vcrypto, GFP_ATOMIC);
181         if (err < 0) {
182                 spin_unlock(&vcrypto->ctrl_lock);
183                 kzfree(cipher_key);
184                 return err;
185         }
186         virtqueue_kick(vcrypto->ctrl_vq);
187
188         /*
189          * Trapping into the hypervisor, so the request should be
190          * handled immediately.
191          */
192         while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
193                !virtqueue_is_broken(vcrypto->ctrl_vq))
194                 cpu_relax();
195
196         if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
197                 spin_unlock(&vcrypto->ctrl_lock);
198                 pr_err("virtio_crypto: Create session failed status: %u\n",
199                         le32_to_cpu(vcrypto->input.status));
200                 kzfree(cipher_key);
201                 return -EINVAL;
202         }
203
204         if (encrypt)
205                 ctx->enc_sess_info.session_id =
206                         le64_to_cpu(vcrypto->input.session_id);
207         else
208                 ctx->dec_sess_info.session_id =
209                         le64_to_cpu(vcrypto->input.session_id);
210
211         spin_unlock(&vcrypto->ctrl_lock);
212
213         kzfree(cipher_key);
214         return 0;
215 }
216
217 static int virtio_crypto_alg_ablkcipher_close_session(
218                 struct virtio_crypto_ablkcipher_ctx *ctx,
219                 int encrypt)
220 {
221         struct scatterlist outhdr, status_sg, *sgs[2];
222         unsigned int tmp;
223         struct virtio_crypto_destroy_session_req *destroy_session;
224         struct virtio_crypto *vcrypto = ctx->vcrypto;
225         int err;
226         unsigned int num_out = 0, num_in = 0;
227
228         spin_lock(&vcrypto->ctrl_lock);
229         vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
230         /* Pad ctrl header */
231         vcrypto->ctrl.header.opcode =
232                 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
233         /* Set the default virtqueue id to 0 */
234         vcrypto->ctrl.header.queue_id = 0;
235
236         destroy_session = &vcrypto->ctrl.u.destroy_session;
237
238         if (encrypt)
239                 destroy_session->session_id =
240                         cpu_to_le64(ctx->enc_sess_info.session_id);
241         else
242                 destroy_session->session_id =
243                         cpu_to_le64(ctx->dec_sess_info.session_id);
244
245         sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
246         sgs[num_out++] = &outhdr;
247
248         /* Return status and session id back */
249         sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
250                 sizeof(vcrypto->ctrl_status.status));
251         sgs[num_out + num_in++] = &status_sg;
252
253         err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
254                         num_in, vcrypto, GFP_ATOMIC);
255         if (err < 0) {
256                 spin_unlock(&vcrypto->ctrl_lock);
257                 return err;
258         }
259         virtqueue_kick(vcrypto->ctrl_vq);
260
261         while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
262                !virtqueue_is_broken(vcrypto->ctrl_vq))
263                 cpu_relax();
264
265         if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
266                 spin_unlock(&vcrypto->ctrl_lock);
267                 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
268                         vcrypto->ctrl_status.status,
269                         destroy_session->session_id);
270
271                 return -EINVAL;
272         }
273         spin_unlock(&vcrypto->ctrl_lock);
274
275         return 0;
276 }
277
278 static int virtio_crypto_alg_ablkcipher_init_sessions(
279                 struct virtio_crypto_ablkcipher_ctx *ctx,
280                 const uint8_t *key, unsigned int keylen)
281 {
282         uint32_t alg;
283         int ret;
284         struct virtio_crypto *vcrypto = ctx->vcrypto;
285
286         if (keylen > vcrypto->max_cipher_key_len) {
287                 pr_err("virtio_crypto: the key is too long\n");
288                 goto bad_key;
289         }
290
291         if (virtio_crypto_alg_validate_key(keylen, &alg))
292                 goto bad_key;
293
294         /* Create encryption session */
295         ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
296                         alg, key, keylen, 1);
297         if (ret)
298                 return ret;
299         /* Create decryption session */
300         ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
301                         alg, key, keylen, 0);
302         if (ret) {
303                 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
304                 return ret;
305         }
306         return 0;
307
308 bad_key:
309         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
310         return -EINVAL;
311 }
312
313 /* Note: kernel crypto API realization */
314 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
315                                          const uint8_t *key,
316                                          unsigned int keylen)
317 {
318         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
319         uint32_t alg;
320         int ret;
321
322         ret = virtio_crypto_alg_validate_key(keylen, &alg);
323         if (ret)
324                 return ret;
325
326         if (!ctx->vcrypto) {
327                 /* New key */
328                 int node = virtio_crypto_get_current_node();
329                 struct virtio_crypto *vcrypto =
330                                       virtcrypto_get_dev_node(node,
331                                       VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
332                 if (!vcrypto) {
333                         pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
334                         return -ENODEV;
335                 }
336
337                 ctx->vcrypto = vcrypto;
338         } else {
339                 /* Rekeying, we should close the created sessions previously */
340                 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
341                 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
342         }
343
344         ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
345         if (ret) {
346                 virtcrypto_dev_put(ctx->vcrypto);
347                 ctx->vcrypto = NULL;
348
349                 return ret;
350         }
351
352         return 0;
353 }
354
355 static int
356 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
357                 struct ablkcipher_request *req,
358                 struct data_queue *data_vq)
359 {
360         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
361         struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
362         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
363         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
364         struct virtio_crypto *vcrypto = ctx->vcrypto;
365         struct virtio_crypto_op_data_req *req_data;
366         int src_nents, dst_nents;
367         int err;
368         unsigned long flags;
369         struct scatterlist outhdr, iv_sg, status_sg, **sgs;
370         u64 dst_len;
371         unsigned int num_out = 0, num_in = 0;
372         int sg_total;
373         uint8_t *iv;
374         struct scatterlist *sg;
375
376         src_nents = sg_nents_for_len(req->src, req->nbytes);
377         if (src_nents < 0) {
378                 pr_err("Invalid number of src SG.\n");
379                 return src_nents;
380         }
381
382         dst_nents = sg_nents(req->dst);
383
384         pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
385                         src_nents, dst_nents);
386
387         /* Why 3?  outhdr + iv + inhdr */
388         sg_total = src_nents + dst_nents + 3;
389         sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
390                                 dev_to_node(&vcrypto->vdev->dev));
391         if (!sgs)
392                 return -ENOMEM;
393
394         req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
395                                 dev_to_node(&vcrypto->vdev->dev));
396         if (!req_data) {
397                 kfree(sgs);
398                 return -ENOMEM;
399         }
400
401         vc_req->req_data = req_data;
402         vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
403         /* Head of operation */
404         if (vc_sym_req->encrypt) {
405                 req_data->header.session_id =
406                         cpu_to_le64(ctx->enc_sess_info.session_id);
407                 req_data->header.opcode =
408                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
409         } else {
410                 req_data->header.session_id =
411                         cpu_to_le64(ctx->dec_sess_info.session_id);
412             req_data->header.opcode =
413                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
414         }
415         req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
416         req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
417         req_data->u.sym_req.u.cipher.para.src_data_len =
418                         cpu_to_le32(req->nbytes);
419
420         dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
421         if (unlikely(dst_len > U32_MAX)) {
422                 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
423                 err = -EINVAL;
424                 goto free;
425         }
426
427         dst_len = min_t(unsigned int, req->nbytes, dst_len);
428         pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
429                         req->nbytes, dst_len);
430
431         if (unlikely(req->nbytes + dst_len + ivsize +
432                 sizeof(vc_req->status) > vcrypto->max_size)) {
433                 pr_err("virtio_crypto: The length is too big\n");
434                 err = -EINVAL;
435                 goto free;
436         }
437
438         req_data->u.sym_req.u.cipher.para.dst_data_len =
439                         cpu_to_le32((uint32_t)dst_len);
440
441         /* Outhdr */
442         sg_init_one(&outhdr, req_data, sizeof(*req_data));
443         sgs[num_out++] = &outhdr;
444
445         /* IV */
446
447         /*
448          * Avoid to do DMA from the stack, switch to using
449          * dynamically-allocated for the IV
450          */
451         iv = kzalloc_node(ivsize, GFP_ATOMIC,
452                                 dev_to_node(&vcrypto->vdev->dev));
453         if (!iv) {
454                 err = -ENOMEM;
455                 goto free;
456         }
457         memcpy(iv, req->info, ivsize);
458         if (!vc_sym_req->encrypt)
459                 scatterwalk_map_and_copy(req->info, req->src,
460                                          req->nbytes - AES_BLOCK_SIZE,
461                                          AES_BLOCK_SIZE, 0);
462
463         sg_init_one(&iv_sg, iv, ivsize);
464         sgs[num_out++] = &iv_sg;
465         vc_sym_req->iv = iv;
466
467         /* Source data */
468         for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
469                 sgs[num_out++] = sg;
470
471         /* Destination data */
472         for (sg = req->dst; sg; sg = sg_next(sg))
473                 sgs[num_out + num_in++] = sg;
474
475         /* Status */
476         sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
477         sgs[num_out + num_in++] = &status_sg;
478
479         vc_req->sgs = sgs;
480
481         spin_lock_irqsave(&data_vq->lock, flags);
482         err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
483                                 num_in, vc_req, GFP_ATOMIC);
484         virtqueue_kick(data_vq->vq);
485         spin_unlock_irqrestore(&data_vq->lock, flags);
486         if (unlikely(err < 0))
487                 goto free_iv;
488
489         return 0;
490
491 free_iv:
492         kzfree(iv);
493 free:
494         kzfree(req_data);
495         kfree(sgs);
496         return err;
497 }
498
499 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
500 {
501         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
502         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
503         struct virtio_crypto_sym_request *vc_sym_req =
504                                 ablkcipher_request_ctx(req);
505         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
506         struct virtio_crypto *vcrypto = ctx->vcrypto;
507         /* Use the first data virtqueue as default */
508         struct data_queue *data_vq = &vcrypto->data_vq[0];
509
510         if (!req->nbytes)
511                 return 0;
512         if (req->nbytes % AES_BLOCK_SIZE)
513                 return -EINVAL;
514
515         vc_req->dataq = data_vq;
516         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
517         vc_sym_req->ablkcipher_ctx = ctx;
518         vc_sym_req->ablkcipher_req = req;
519         vc_sym_req->encrypt = true;
520
521         return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
522 }
523
524 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
525 {
526         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
527         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
528         struct virtio_crypto_sym_request *vc_sym_req =
529                                 ablkcipher_request_ctx(req);
530         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
531         struct virtio_crypto *vcrypto = ctx->vcrypto;
532         /* Use the first data virtqueue as default */
533         struct data_queue *data_vq = &vcrypto->data_vq[0];
534
535         if (!req->nbytes)
536                 return 0;
537         if (req->nbytes % AES_BLOCK_SIZE)
538                 return -EINVAL;
539
540         vc_req->dataq = data_vq;
541         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
542         vc_sym_req->ablkcipher_ctx = ctx;
543         vc_sym_req->ablkcipher_req = req;
544         vc_sym_req->encrypt = false;
545
546         return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
547 }
548
549 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
550 {
551         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
552
553         tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
554         ctx->tfm = tfm;
555
556         ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
557         ctx->enginectx.op.prepare_request = NULL;
558         ctx->enginectx.op.unprepare_request = NULL;
559         return 0;
560 }
561
562 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
563 {
564         struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
565
566         if (!ctx->vcrypto)
567                 return;
568
569         virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
570         virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
571         virtcrypto_dev_put(ctx->vcrypto);
572         ctx->vcrypto = NULL;
573 }
574
575 int virtio_crypto_ablkcipher_crypt_req(
576         struct crypto_engine *engine, void *vreq)
577 {
578         struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
579         struct virtio_crypto_sym_request *vc_sym_req =
580                                 ablkcipher_request_ctx(req);
581         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
582         struct data_queue *data_vq = vc_req->dataq;
583         int ret;
584
585         ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
586         if (ret < 0)
587                 return ret;
588
589         virtqueue_kick(data_vq->vq);
590
591         return 0;
592 }
593
594 static void virtio_crypto_ablkcipher_finalize_req(
595         struct virtio_crypto_sym_request *vc_sym_req,
596         struct ablkcipher_request *req,
597         int err)
598 {
599         if (vc_sym_req->encrypt)
600                 scatterwalk_map_and_copy(req->info, req->dst,
601                                          req->nbytes - AES_BLOCK_SIZE,
602                                          AES_BLOCK_SIZE, 0);
603         kzfree(vc_sym_req->iv);
604         virtcrypto_clear_request(&vc_sym_req->base);
605
606         crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
607                                            req, err);
608 }
609
610 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
611         .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
612         .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
613         .algo = {
614                 .cra_name = "cbc(aes)",
615                 .cra_driver_name = "virtio_crypto_aes_cbc",
616                 .cra_priority = 150,
617                 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
618                 .cra_blocksize = AES_BLOCK_SIZE,
619                 .cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
620                 .cra_alignmask = 0,
621                 .cra_module = THIS_MODULE,
622                 .cra_type = &crypto_ablkcipher_type,
623                 .cra_init = virtio_crypto_ablkcipher_init,
624                 .cra_exit = virtio_crypto_ablkcipher_exit,
625                 .cra_u = {
626                         .ablkcipher = {
627                                 .setkey = virtio_crypto_ablkcipher_setkey,
628                                 .decrypt = virtio_crypto_ablkcipher_decrypt,
629                                 .encrypt = virtio_crypto_ablkcipher_encrypt,
630                                 .min_keysize = AES_MIN_KEY_SIZE,
631                                 .max_keysize = AES_MAX_KEY_SIZE,
632                                 .ivsize = AES_BLOCK_SIZE,
633                         },
634                 },
635         },
636 } };
637
638 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
639 {
640         int ret = 0;
641         int i = 0;
642
643         mutex_lock(&algs_lock);
644
645         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
646
647                 uint32_t service = virtio_crypto_algs[i].service;
648                 uint32_t algonum = virtio_crypto_algs[i].algonum;
649
650                 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
651                         continue;
652
653                 if (virtio_crypto_algs[i].active_devs == 0) {
654                         ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
655                         if (ret)
656                                 goto unlock;
657                 }
658
659                 virtio_crypto_algs[i].active_devs++;
660                 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
661                          virtio_crypto_algs[i].algo.cra_name);
662         }
663
664 unlock:
665         mutex_unlock(&algs_lock);
666         return ret;
667 }
668
669 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
670 {
671         int i = 0;
672
673         mutex_lock(&algs_lock);
674
675         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
676
677                 uint32_t service = virtio_crypto_algs[i].service;
678                 uint32_t algonum = virtio_crypto_algs[i].algonum;
679
680                 if (virtio_crypto_algs[i].active_devs == 0 ||
681                     !virtcrypto_algo_is_supported(vcrypto, service, algonum))
682                         continue;
683
684                 if (virtio_crypto_algs[i].active_devs == 1)
685                         crypto_unregister_alg(&virtio_crypto_algs[i].algo);
686
687                 virtio_crypto_algs[i].active_devs--;
688         }
689
690         mutex_unlock(&algs_lock);
691 }