2 * algif_aead: User-space interface for AEAD algorithms
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
6 * This file provides the user-space API for AEAD ciphers.
8 * This file is derived from algif_skcipher.c.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/aead.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/if_alg.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/net.h>
29 struct scatterlist sg[ALG_MAX_PAGES];
33 struct crypto_aead *aead;
38 struct aead_sg_list tsgl;
40 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
41 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
44 #define RSGL_MAX_ENTRIES ALG_MAX_PAGES
45 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
49 struct af_alg_completion completion;
59 struct aead_request aead_req;
62 static inline int aead_sndbuf(struct sock *sk)
64 struct alg_sock *ask = alg_sk(sk);
65 struct aead_ctx *ctx = ask->private;
67 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
71 static inline bool aead_writable(struct sock *sk)
73 return PAGE_SIZE <= aead_sndbuf(sk);
76 static inline bool aead_sufficient_data(struct aead_ctx *ctx)
78 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
80 return ctx->used >= ctx->aead_assoclen + as;
83 static void aead_put_sgl(struct sock *sk)
85 struct alg_sock *ask = alg_sk(sk);
86 struct aead_ctx *ctx = ask->private;
87 struct aead_sg_list *sgl = &ctx->tsgl;
88 struct scatterlist *sg = sgl->sg;
91 for (i = 0; i < sgl->cur; i++) {
95 put_page(sg_page(sg + i));
96 sg_assign_page(sg + i, NULL);
98 sg_init_table(sg, ALG_MAX_PAGES);
105 static void aead_wmem_wakeup(struct sock *sk)
107 struct socket_wq *wq;
109 if (!aead_writable(sk))
113 wq = rcu_dereference(sk->sk_wq);
114 if (wq_has_sleeper(wq))
115 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
118 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
122 static int aead_wait_for_data(struct sock *sk, unsigned flags)
124 struct alg_sock *ask = alg_sk(sk);
125 struct aead_ctx *ctx = ask->private;
128 int err = -ERESTARTSYS;
130 if (flags & MSG_DONTWAIT)
133 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
136 if (signal_pending(current))
138 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
139 timeout = MAX_SCHEDULE_TIMEOUT;
140 if (sk_wait_event(sk, &timeout, !ctx->more)) {
145 finish_wait(sk_sleep(sk), &wait);
147 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
152 static void aead_data_wakeup(struct sock *sk)
154 struct alg_sock *ask = alg_sk(sk);
155 struct aead_ctx *ctx = ask->private;
156 struct socket_wq *wq;
164 wq = rcu_dereference(sk->sk_wq);
165 if (wq_has_sleeper(wq))
166 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
169 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
173 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
175 struct sock *sk = sock->sk;
176 struct alg_sock *ask = alg_sk(sk);
177 struct aead_ctx *ctx = ask->private;
179 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
180 struct aead_sg_list *sgl = &ctx->tsgl;
181 struct af_alg_control con = {};
187 if (msg->msg_controllen) {
188 err = af_alg_cmsg_send(msg, &con);
204 if (con.iv && con.iv->ivlen != ivsize)
209 if (!ctx->more && ctx->used)
215 memcpy(ctx->iv, con.iv->iv, ivsize);
217 ctx->aead_assoclen = con.aead_assoclen;
221 unsigned long len = size;
222 struct scatterlist *sg = NULL;
224 /* use the existing memory in an allocated page */
226 sg = sgl->sg + sgl->cur - 1;
227 len = min_t(unsigned long, len,
228 PAGE_SIZE - sg->offset - sg->length);
229 err = memcpy_from_msg(page_address(sg_page(sg)) +
230 sg->offset + sg->length,
236 ctx->merge = (sg->offset + sg->length) &
245 if (!aead_writable(sk)) {
246 /* user space sent too much data */
252 /* allocate a new page */
253 len = min_t(unsigned long, size, aead_sndbuf(sk));
257 if (sgl->cur >= ALG_MAX_PAGES) {
263 sg = sgl->sg + sgl->cur;
264 plen = min_t(int, len, PAGE_SIZE);
266 sg_assign_page(sg, alloc_page(GFP_KERNEL));
271 err = memcpy_from_msg(page_address(sg_page(sg)),
274 __free_page(sg_page(sg));
275 sg_assign_page(sg, NULL);
286 ctx->merge = plen & (PAGE_SIZE - 1);
292 ctx->more = msg->msg_flags & MSG_MORE;
293 if (!ctx->more && !aead_sufficient_data(ctx)) {
299 aead_data_wakeup(sk);
302 return err ?: copied;
305 static ssize_t aead_sendpage(struct socket *sock, struct page *page,
306 int offset, size_t size, int flags)
308 struct sock *sk = sock->sk;
309 struct alg_sock *ask = alg_sk(sk);
310 struct aead_ctx *ctx = ask->private;
311 struct aead_sg_list *sgl = &ctx->tsgl;
314 if (flags & MSG_SENDPAGE_NOTLAST)
317 if (sgl->cur >= ALG_MAX_PAGES)
321 if (!ctx->more && ctx->used)
327 if (!aead_writable(sk)) {
328 /* user space sent too much data */
337 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
344 ctx->more = flags & MSG_MORE;
345 if (!ctx->more && !aead_sufficient_data(ctx)) {
351 aead_data_wakeup(sk);
357 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
359 struct sock *sk = sock->sk;
360 struct alg_sock *ask = alg_sk(sk);
361 struct aead_ctx *ctx = ask->private;
362 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
363 struct aead_sg_list *sgl = &ctx->tsgl;
366 unsigned long used = 0;
368 size_t usedpages = 0;
369 unsigned int cnt = 0;
371 /* Limit number of IOV blocks to be accessed below */
372 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
378 * AEAD memory structure: For encryption, the tag is appended to the
379 * ciphertext which implies that the memory allocated for the ciphertext
380 * must be increased by the tag length. For decryption, the tag
381 * is expected to be concatenated to the ciphertext. The plaintext
382 * therefore has a memory size of the ciphertext minus the tag length.
384 * The memory structure for cipher operation has the following
386 * AEAD encryption input: assoc data || plaintext
387 * AEAD encryption output: cipherntext || auth tag
388 * AEAD decryption input: assoc data || ciphertext || auth tag
389 * AEAD decryption output: plaintext
393 err = aead_wait_for_data(sk, flags);
401 * Make sure sufficient data is present -- note, the same check is
402 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
403 * shall provide an information to the data sender that something is
404 * wrong, but they are irrelevant to maintain the kernel integrity.
405 * We need this check here too in case user space decides to not honor
406 * the error message in sendmsg/sendpage and still call recvmsg. This
407 * check here protects the kernel integrity.
409 if (!aead_sufficient_data(ctx))
415 * The cipher operation input data is reduced by the associated data
416 * length as this data is processed separately later on.
418 used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
420 /* convert iovecs of output buffers into scatterlists */
421 while (iov_iter_count(&msg->msg_iter)) {
422 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
423 (outlen - usedpages));
425 /* make one iovec available as scatterlist */
426 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
431 /* chain the new scatterlist with previous one */
433 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
435 /* we do not need more iovecs as we have sufficient memory */
436 if (outlen <= usedpages)
438 iov_iter_advance(&msg->msg_iter, err);
443 /* ensure output buffer is sufficiently large */
444 if (usedpages < outlen)
447 sg_mark_end(sgl->sg + sgl->cur - 1);
449 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
451 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
453 err = af_alg_wait_for_completion(ctx->enc ?
454 crypto_aead_encrypt(&ctx->aead_req) :
455 crypto_aead_decrypt(&ctx->aead_req),
459 /* EBADMSG implies a valid cipher operation took place */
470 for (i = 0; i < cnt; i++)
471 af_alg_free_sg(&ctx->rsgl[i]);
473 aead_wmem_wakeup(sk);
476 return err ? err : outlen;
479 static unsigned int aead_poll(struct file *file, struct socket *sock,
482 struct sock *sk = sock->sk;
483 struct alg_sock *ask = alg_sk(sk);
484 struct aead_ctx *ctx = ask->private;
487 sock_poll_wait(file, sk_sleep(sk), wait);
491 mask |= POLLIN | POLLRDNORM;
493 if (aead_writable(sk))
494 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
499 static struct proto_ops algif_aead_ops = {
502 .connect = sock_no_connect,
503 .socketpair = sock_no_socketpair,
504 .getname = sock_no_getname,
505 .ioctl = sock_no_ioctl,
506 .listen = sock_no_listen,
507 .shutdown = sock_no_shutdown,
508 .getsockopt = sock_no_getsockopt,
509 .mmap = sock_no_mmap,
510 .bind = sock_no_bind,
511 .accept = sock_no_accept,
512 .setsockopt = sock_no_setsockopt,
514 .release = af_alg_release,
515 .sendmsg = aead_sendmsg,
516 .sendpage = aead_sendpage,
517 .recvmsg = aead_recvmsg,
521 static int aead_check_key(struct socket *sock)
525 struct alg_sock *pask;
526 struct aead_tfm *tfm;
527 struct sock *sk = sock->sk;
528 struct alg_sock *ask = alg_sk(sk);
531 if (!atomic_read(&ask->nokey_refcnt))
535 pask = alg_sk(ask->parent);
539 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
543 atomic_dec(&pask->nokey_refcnt);
544 atomic_set(&ask->nokey_refcnt, 0);
556 static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
561 err = aead_check_key(sock);
565 return aead_sendmsg(sock, msg, size);
568 static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
569 int offset, size_t size, int flags)
573 err = aead_check_key(sock);
577 return aead_sendpage(sock, page, offset, size, flags);
580 static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
581 size_t ignored, int flags)
585 err = aead_check_key(sock);
589 return aead_recvmsg(sock, msg, ignored, flags);
592 static struct proto_ops algif_aead_ops_nokey = {
595 .connect = sock_no_connect,
596 .socketpair = sock_no_socketpair,
597 .getname = sock_no_getname,
598 .ioctl = sock_no_ioctl,
599 .listen = sock_no_listen,
600 .shutdown = sock_no_shutdown,
601 .getsockopt = sock_no_getsockopt,
602 .mmap = sock_no_mmap,
603 .bind = sock_no_bind,
604 .accept = sock_no_accept,
605 .setsockopt = sock_no_setsockopt,
607 .release = af_alg_release,
608 .sendmsg = aead_sendmsg_nokey,
609 .sendpage = aead_sendpage_nokey,
610 .recvmsg = aead_recvmsg_nokey,
614 static void *aead_bind(const char *name, u32 type, u32 mask)
616 struct aead_tfm *tfm;
617 struct crypto_aead *aead;
619 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
621 return ERR_PTR(-ENOMEM);
623 aead = crypto_alloc_aead(name, type, mask);
626 return ERR_CAST(aead);
634 static void aead_release(void *private)
636 struct aead_tfm *tfm = private;
638 crypto_free_aead(tfm->aead);
642 static int aead_setauthsize(void *private, unsigned int authsize)
644 struct aead_tfm *tfm = private;
646 return crypto_aead_setauthsize(tfm->aead, authsize);
649 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
651 struct aead_tfm *tfm = private;
654 err = crypto_aead_setkey(tfm->aead, key, keylen);
660 static void aead_sock_destruct(struct sock *sk)
662 struct alg_sock *ask = alg_sk(sk);
663 struct aead_ctx *ctx = ask->private;
664 unsigned int ivlen = crypto_aead_ivsize(
665 crypto_aead_reqtfm(&ctx->aead_req));
668 sock_kzfree_s(sk, ctx->iv, ivlen);
669 sock_kfree_s(sk, ctx, ctx->len);
670 af_alg_release_parent(sk);
673 static int aead_accept_parent_nokey(void *private, struct sock *sk)
675 struct aead_ctx *ctx;
676 struct alg_sock *ask = alg_sk(sk);
677 struct aead_tfm *tfm = private;
678 struct crypto_aead *aead = tfm->aead;
679 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
680 unsigned int ivlen = crypto_aead_ivsize(aead);
682 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
687 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
689 sock_kfree_s(sk, ctx, len);
692 memset(ctx->iv, 0, ivlen);
700 ctx->aead_assoclen = 0;
701 af_alg_init_completion(&ctx->completion);
702 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
706 aead_request_set_tfm(&ctx->aead_req, aead);
707 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
708 af_alg_complete, &ctx->completion);
710 sk->sk_destruct = aead_sock_destruct;
715 static int aead_accept_parent(void *private, struct sock *sk)
717 struct aead_tfm *tfm = private;
722 return aead_accept_parent_nokey(private, sk);
725 static const struct af_alg_type algif_type_aead = {
727 .release = aead_release,
728 .setkey = aead_setkey,
729 .setauthsize = aead_setauthsize,
730 .accept = aead_accept_parent,
731 .accept_nokey = aead_accept_parent_nokey,
732 .ops = &algif_aead_ops,
733 .ops_nokey = &algif_aead_ops_nokey,
738 static int __init algif_aead_init(void)
740 return af_alg_register_type(&algif_type_aead);
743 static void __exit algif_aead_exit(void)
745 int err = af_alg_unregister_type(&algif_type_aead);
749 module_init(algif_aead_init);
750 module_exit(algif_aead_exit);
751 MODULE_LICENSE("GPL");
752 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
753 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");