1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
4 * Authors: Gonglei <arei.gonglei@huawei.com>
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <crypto/internal/skcipher.h>
12 #include <linux/err.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/atomic.h>
16 #include <uapi/linux/virtio_crypto.h>
17 #include "virtio_crypto_common.h"
20 struct virtio_crypto_skcipher_ctx {
21 struct crypto_engine_ctx enginectx;
22 struct virtio_crypto *vcrypto;
23 struct crypto_skcipher *tfm;
25 struct virtio_crypto_sym_session_info enc_sess_info;
26 struct virtio_crypto_sym_session_info dec_sess_info;
29 struct virtio_crypto_sym_request {
30 struct virtio_crypto_request base;
34 struct virtio_crypto_skcipher_ctx *skcipher_ctx;
35 struct skcipher_request *skcipher_req;
41 struct virtio_crypto_algo {
44 unsigned int active_devs;
45 struct skcipher_alg algo;
49 * The algs_lock protects the below global virtio_crypto_active_devs
50 * and crypto algorithms registion.
52 static DEFINE_MUTEX(algs_lock);
53 static void virtio_crypto_skcipher_finalize_req(
54 struct virtio_crypto_sym_request *vc_sym_req,
55 struct skcipher_request *req,
58 static void virtio_crypto_dataq_sym_callback
59 (struct virtio_crypto_request *vc_req, int len)
61 struct virtio_crypto_sym_request *vc_sym_req =
62 container_of(vc_req, struct virtio_crypto_sym_request, base);
63 struct skcipher_request *ablk_req;
66 /* Finish the encrypt or decrypt process */
67 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
68 switch (vc_req->status) {
69 case VIRTIO_CRYPTO_OK:
72 case VIRTIO_CRYPTO_INVSESS:
73 case VIRTIO_CRYPTO_ERR:
76 case VIRTIO_CRYPTO_BADMSG:
83 ablk_req = vc_sym_req->skcipher_req;
84 virtio_crypto_skcipher_finalize_req(vc_sym_req,
89 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
93 for (total = 0; sg; sg = sg_next(sg))
100 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
103 case AES_KEYSIZE_128:
104 case AES_KEYSIZE_192:
105 case AES_KEYSIZE_256:
106 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
114 static int virtio_crypto_alg_skcipher_init_session(
115 struct virtio_crypto_skcipher_ctx *ctx,
116 uint32_t alg, const uint8_t *key,
120 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
121 struct virtio_crypto *vcrypto = ctx->vcrypto;
122 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
124 unsigned int num_out = 0, num_in = 0;
125 struct virtio_crypto_op_ctrl_req *ctrl;
126 struct virtio_crypto_session_input *input;
127 struct virtio_crypto_sym_create_session_req *sym_create_session;
128 struct virtio_crypto_ctrl_request *vc_ctrl_req;
131 * Avoid to do DMA from the stack, switch to using
132 * dynamically-allocated for the key
134 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
139 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
145 /* Pad ctrl header */
146 ctrl = &vc_ctrl_req->ctrl;
147 ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
148 ctrl->header.algo = cpu_to_le32(alg);
149 /* Set the default dataqueue id to 0 */
150 ctrl->header.queue_id = 0;
152 input = &vc_ctrl_req->input;
153 input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
154 /* Pad cipher's parameters */
155 sym_create_session = &ctrl->u.sym_create_session;
156 sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
157 sym_create_session->u.cipher.para.algo = ctrl->header.algo;
158 sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
159 sym_create_session->u.cipher.para.op = cpu_to_le32(op);
161 sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
162 sgs[num_out++] = &outhdr;
165 sg_init_one(&key_sg, cipher_key, keylen);
166 sgs[num_out++] = &key_sg;
168 /* Return status and session id back */
169 sg_init_one(&inhdr, input, sizeof(*input));
170 sgs[num_out + num_in++] = &inhdr;
172 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
176 if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
177 pr_err("virtio_crypto: Create session failed status: %u\n",
178 le32_to_cpu(input->status));
184 ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
186 ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
191 kfree_sensitive(cipher_key);
195 static int virtio_crypto_alg_skcipher_close_session(
196 struct virtio_crypto_skcipher_ctx *ctx,
199 struct scatterlist outhdr, status_sg, *sgs[2];
200 struct virtio_crypto_destroy_session_req *destroy_session;
201 struct virtio_crypto *vcrypto = ctx->vcrypto;
203 unsigned int num_out = 0, num_in = 0;
204 struct virtio_crypto_op_ctrl_req *ctrl;
205 struct virtio_crypto_inhdr *ctrl_status;
206 struct virtio_crypto_ctrl_request *vc_ctrl_req;
208 vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
212 ctrl_status = &vc_ctrl_req->ctrl_status;
213 ctrl_status->status = VIRTIO_CRYPTO_ERR;
214 /* Pad ctrl header */
215 ctrl = &vc_ctrl_req->ctrl;
216 ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
217 /* Set the default virtqueue id to 0 */
218 ctrl->header.queue_id = 0;
220 destroy_session = &ctrl->u.destroy_session;
223 destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
225 destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
227 sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
228 sgs[num_out++] = &outhdr;
230 /* Return status and session id back */
231 sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
232 sgs[num_out + num_in++] = &status_sg;
234 err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
238 if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
239 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
240 ctrl_status->status, destroy_session->session_id);
251 static int virtio_crypto_alg_skcipher_init_sessions(
252 struct virtio_crypto_skcipher_ctx *ctx,
253 const uint8_t *key, unsigned int keylen)
257 struct virtio_crypto *vcrypto = ctx->vcrypto;
259 if (keylen > vcrypto->max_cipher_key_len) {
260 pr_err("virtio_crypto: the key is too long\n");
264 if (virtio_crypto_alg_validate_key(keylen, &alg))
267 /* Create encryption session */
268 ret = virtio_crypto_alg_skcipher_init_session(ctx,
269 alg, key, keylen, 1);
272 /* Create decryption session */
273 ret = virtio_crypto_alg_skcipher_init_session(ctx,
274 alg, key, keylen, 0);
276 virtio_crypto_alg_skcipher_close_session(ctx, 1);
282 /* Note: kernel crypto API realization */
283 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
287 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
291 ret = virtio_crypto_alg_validate_key(keylen, &alg);
297 int node = virtio_crypto_get_current_node();
298 struct virtio_crypto *vcrypto =
299 virtcrypto_get_dev_node(node,
300 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
302 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
306 ctx->vcrypto = vcrypto;
308 /* Rekeying, we should close the created sessions previously */
309 virtio_crypto_alg_skcipher_close_session(ctx, 1);
310 virtio_crypto_alg_skcipher_close_session(ctx, 0);
313 ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
315 virtcrypto_dev_put(ctx->vcrypto);
325 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
326 struct skcipher_request *req,
327 struct data_queue *data_vq)
329 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
330 struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
331 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
332 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
333 struct virtio_crypto *vcrypto = ctx->vcrypto;
334 struct virtio_crypto_op_data_req *req_data;
335 int src_nents, dst_nents;
338 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
340 unsigned int num_out = 0, num_in = 0;
343 struct scatterlist *sg;
345 src_nents = sg_nents_for_len(req->src, req->cryptlen);
347 pr_err("Invalid number of src SG.\n");
351 dst_nents = sg_nents(req->dst);
353 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
354 src_nents, dst_nents);
356 /* Why 3? outhdr + iv + inhdr */
357 sg_total = src_nents + dst_nents + 3;
358 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
359 dev_to_node(&vcrypto->vdev->dev));
363 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
364 dev_to_node(&vcrypto->vdev->dev));
370 vc_req->req_data = req_data;
371 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
372 /* Head of operation */
373 if (vc_sym_req->encrypt) {
374 req_data->header.session_id =
375 cpu_to_le64(ctx->enc_sess_info.session_id);
376 req_data->header.opcode =
377 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
379 req_data->header.session_id =
380 cpu_to_le64(ctx->dec_sess_info.session_id);
381 req_data->header.opcode =
382 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
384 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
385 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
386 req_data->u.sym_req.u.cipher.para.src_data_len =
387 cpu_to_le32(req->cryptlen);
389 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
390 if (unlikely(dst_len > U32_MAX)) {
391 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
396 dst_len = min_t(unsigned int, req->cryptlen, dst_len);
397 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
398 req->cryptlen, dst_len);
400 if (unlikely(req->cryptlen + dst_len + ivsize +
401 sizeof(vc_req->status) > vcrypto->max_size)) {
402 pr_err("virtio_crypto: The length is too big\n");
407 req_data->u.sym_req.u.cipher.para.dst_data_len =
408 cpu_to_le32((uint32_t)dst_len);
411 sg_init_one(&outhdr, req_data, sizeof(*req_data));
412 sgs[num_out++] = &outhdr;
417 * Avoid to do DMA from the stack, switch to using
418 * dynamically-allocated for the IV
420 iv = kzalloc_node(ivsize, GFP_ATOMIC,
421 dev_to_node(&vcrypto->vdev->dev));
426 memcpy(iv, req->iv, ivsize);
427 if (!vc_sym_req->encrypt)
428 scatterwalk_map_and_copy(req->iv, req->src,
429 req->cryptlen - AES_BLOCK_SIZE,
432 sg_init_one(&iv_sg, iv, ivsize);
433 sgs[num_out++] = &iv_sg;
437 for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
440 /* Destination data */
441 for (sg = req->dst; sg; sg = sg_next(sg))
442 sgs[num_out + num_in++] = sg;
445 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
446 sgs[num_out + num_in++] = &status_sg;
450 spin_lock_irqsave(&data_vq->lock, flags);
451 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
452 num_in, vc_req, GFP_ATOMIC);
453 virtqueue_kick(data_vq->vq);
454 spin_unlock_irqrestore(&data_vq->lock, flags);
455 if (unlikely(err < 0))
463 kfree_sensitive(req_data);
468 static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
470 struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
471 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
472 struct virtio_crypto_sym_request *vc_sym_req =
473 skcipher_request_ctx(req);
474 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
475 struct virtio_crypto *vcrypto = ctx->vcrypto;
476 /* Use the first data virtqueue as default */
477 struct data_queue *data_vq = &vcrypto->data_vq[0];
481 if (req->cryptlen % AES_BLOCK_SIZE)
484 vc_req->dataq = data_vq;
485 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
486 vc_sym_req->skcipher_ctx = ctx;
487 vc_sym_req->skcipher_req = req;
488 vc_sym_req->encrypt = true;
490 return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
493 static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
495 struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
496 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
497 struct virtio_crypto_sym_request *vc_sym_req =
498 skcipher_request_ctx(req);
499 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
500 struct virtio_crypto *vcrypto = ctx->vcrypto;
501 /* Use the first data virtqueue as default */
502 struct data_queue *data_vq = &vcrypto->data_vq[0];
506 if (req->cryptlen % AES_BLOCK_SIZE)
509 vc_req->dataq = data_vq;
510 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
511 vc_sym_req->skcipher_ctx = ctx;
512 vc_sym_req->skcipher_req = req;
513 vc_sym_req->encrypt = false;
515 return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
518 static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
520 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
522 crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
525 ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
526 ctx->enginectx.op.prepare_request = NULL;
527 ctx->enginectx.op.unprepare_request = NULL;
531 static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
533 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
538 virtio_crypto_alg_skcipher_close_session(ctx, 1);
539 virtio_crypto_alg_skcipher_close_session(ctx, 0);
540 virtcrypto_dev_put(ctx->vcrypto);
544 int virtio_crypto_skcipher_crypt_req(
545 struct crypto_engine *engine, void *vreq)
547 struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
548 struct virtio_crypto_sym_request *vc_sym_req =
549 skcipher_request_ctx(req);
550 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
551 struct data_queue *data_vq = vc_req->dataq;
554 ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
558 virtqueue_kick(data_vq->vq);
563 static void virtio_crypto_skcipher_finalize_req(
564 struct virtio_crypto_sym_request *vc_sym_req,
565 struct skcipher_request *req,
568 if (vc_sym_req->encrypt)
569 scatterwalk_map_and_copy(req->iv, req->dst,
570 req->cryptlen - AES_BLOCK_SIZE,
572 kfree_sensitive(vc_sym_req->iv);
573 virtcrypto_clear_request(&vc_sym_req->base);
575 crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
579 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
580 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
581 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
583 .base.cra_name = "cbc(aes)",
584 .base.cra_driver_name = "virtio_crypto_aes_cbc",
585 .base.cra_priority = 150,
586 .base.cra_flags = CRYPTO_ALG_ASYNC |
587 CRYPTO_ALG_ALLOCATES_MEMORY,
588 .base.cra_blocksize = AES_BLOCK_SIZE,
589 .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
590 .base.cra_module = THIS_MODULE,
591 .init = virtio_crypto_skcipher_init,
592 .exit = virtio_crypto_skcipher_exit,
593 .setkey = virtio_crypto_skcipher_setkey,
594 .decrypt = virtio_crypto_skcipher_decrypt,
595 .encrypt = virtio_crypto_skcipher_encrypt,
596 .min_keysize = AES_MIN_KEY_SIZE,
597 .max_keysize = AES_MAX_KEY_SIZE,
598 .ivsize = AES_BLOCK_SIZE,
602 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
607 mutex_lock(&algs_lock);
609 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
611 uint32_t service = virtio_crypto_algs[i].service;
612 uint32_t algonum = virtio_crypto_algs[i].algonum;
614 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
617 if (virtio_crypto_algs[i].active_devs == 0) {
618 ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
623 virtio_crypto_algs[i].active_devs++;
624 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
625 virtio_crypto_algs[i].algo.base.cra_name);
629 mutex_unlock(&algs_lock);
633 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
637 mutex_lock(&algs_lock);
639 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
641 uint32_t service = virtio_crypto_algs[i].service;
642 uint32_t algonum = virtio_crypto_algs[i].algonum;
644 if (virtio_crypto_algs[i].active_devs == 0 ||
645 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
648 if (virtio_crypto_algs[i].active_devs == 1)
649 crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
651 virtio_crypto_algs[i].active_devs--;
654 mutex_unlock(&algs_lock);