2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
45 #include <net/strparser.h>
50 struct tls_decrypt_arg {
61 struct tls_decrypt_ctx {
64 u8 aad[TLS_MAX_AAD_SIZE];
67 struct scatterlist sg[];
70 noinline void tls_err_abort(struct sock *sk, int err)
72 WARN_ON_ONCE(err >= 0);
73 /* sk->sk_err should contain a positive error code. */
74 WRITE_ONCE(sk->sk_err, -err);
75 /* Paired with smp_rmb() in tcp_poll() */
80 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
81 unsigned int recursion_level)
83 int start = skb_headlen(skb);
84 int i, chunk = start - offset;
85 struct sk_buff *frag_iter;
88 if (unlikely(recursion_level >= 24))
101 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
104 WARN_ON(start > offset + len);
106 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
107 chunk = end - offset;
120 if (unlikely(skb_has_frag_list(skb))) {
121 skb_walk_frags(skb, frag_iter) {
124 WARN_ON(start > offset + len);
126 end = start + frag_iter->len;
127 chunk = end - offset;
131 ret = __skb_nsg(frag_iter, offset - start, chunk,
132 recursion_level + 1);
133 if (unlikely(ret < 0))
148 /* Return the number of scatterlist elements required to completely map the
149 * skb, or -EMSGSIZE if the recursion depth is exceeded.
151 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 return __skb_nsg(skb, offset, len, 0);
156 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
157 struct tls_decrypt_arg *darg)
159 struct strp_msg *rxm = strp_msg(skb);
160 struct tls_msg *tlm = tls_msg(skb);
163 /* Determine zero-padding length */
164 if (prot->version == TLS_1_3_VERSION) {
165 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
166 char content_type = darg->zc ? darg->tail : 0;
169 while (content_type == 0) {
170 if (offset < prot->prepend_size)
172 err = skb_copy_bits(skb, rxm->offset + offset,
181 tlm->control = content_type;
186 static void tls_decrypt_done(crypto_completion_data_t *data, int err)
188 struct aead_request *aead_req = crypto_get_completion_data(data);
189 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
190 struct scatterlist *sgout = aead_req->dst;
191 struct tls_sw_context_rx *ctx;
192 struct tls_decrypt_ctx *dctx;
193 struct tls_context *tls_ctx;
194 struct scatterlist *sg;
199 /* If requests get too backlogged crypto API returns -EBUSY and calls
200 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
201 * to make waiting for backlog to flush with crypto_wait_req() easier.
202 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 * We have a single struct crypto_async_request per direction, this
205 * scheme doesn't help us, so just ignore the first ->complete().
207 if (err == -EINPROGRESS)
210 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
211 aead_size = ALIGN(aead_size, __alignof__(*dctx));
212 dctx = (void *)((u8 *)aead_req + aead_size);
215 tls_ctx = tls_get_ctx(sk);
216 ctx = tls_sw_ctx_rx(tls_ctx);
218 /* Propagate if there was an err */
221 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
222 ctx->async_wait.err = err;
223 tls_err_abort(sk, err);
226 /* Free the destination pages if skb was not decrypted inplace */
227 if (dctx->free_sgout) {
228 /* Skip the first S/G entry as it points to AAD */
229 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
232 put_page(sg_page(sg));
238 if (atomic_dec_and_test(&ctx->decrypt_pending))
239 complete(&ctx->async_wait.completion);
242 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 if (!atomic_dec_and_test(&ctx->decrypt_pending))
245 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
246 atomic_inc(&ctx->decrypt_pending);
248 return ctx->async_wait.err;
251 static int tls_do_decryption(struct sock *sk,
252 struct scatterlist *sgin,
253 struct scatterlist *sgout,
256 struct aead_request *aead_req,
257 struct tls_decrypt_arg *darg)
259 struct tls_context *tls_ctx = tls_get_ctx(sk);
260 struct tls_prot_info *prot = &tls_ctx->prot_info;
261 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
264 aead_request_set_tfm(aead_req, ctx->aead_recv);
265 aead_request_set_ad(aead_req, prot->aad_size);
266 aead_request_set_crypt(aead_req, sgin, sgout,
267 data_len + prot->tag_size,
271 aead_request_set_callback(aead_req,
272 CRYPTO_TFM_REQ_MAY_BACKLOG,
273 tls_decrypt_done, aead_req);
274 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
275 atomic_inc(&ctx->decrypt_pending);
277 aead_request_set_callback(aead_req,
278 CRYPTO_TFM_REQ_MAY_BACKLOG,
279 crypto_req_done, &ctx->async_wait);
282 ret = crypto_aead_decrypt(aead_req);
283 if (ret == -EINPROGRESS)
287 ret = tls_decrypt_async_wait(ctx);
288 darg->async_done = true;
289 /* all completions have run, we're not doing async anymore */
292 ret = ret ?: -EINPROGRESS;
295 atomic_dec(&ctx->decrypt_pending);
301 static void tls_trim_both_msgs(struct sock *sk, int target_size)
303 struct tls_context *tls_ctx = tls_get_ctx(sk);
304 struct tls_prot_info *prot = &tls_ctx->prot_info;
305 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
306 struct tls_rec *rec = ctx->open_rec;
308 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
310 target_size += prot->overhead_size;
311 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
314 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
316 struct tls_context *tls_ctx = tls_get_ctx(sk);
317 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
318 struct tls_rec *rec = ctx->open_rec;
319 struct sk_msg *msg_en = &rec->msg_encrypted;
321 return sk_msg_alloc(sk, msg_en, len, 0);
324 static int tls_clone_plaintext_msg(struct sock *sk, int required)
326 struct tls_context *tls_ctx = tls_get_ctx(sk);
327 struct tls_prot_info *prot = &tls_ctx->prot_info;
328 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
329 struct tls_rec *rec = ctx->open_rec;
330 struct sk_msg *msg_pl = &rec->msg_plaintext;
331 struct sk_msg *msg_en = &rec->msg_encrypted;
334 /* We add page references worth len bytes from encrypted sg
335 * at the end of plaintext sg. It is guaranteed that msg_en
336 * has enough required room (ensured by caller).
338 len = required - msg_pl->sg.size;
340 /* Skip initial bytes in msg_en's data to be able to use
341 * same offset of both plain and encrypted data.
343 skip = prot->prepend_size + msg_pl->sg.size;
345 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
348 static struct tls_rec *tls_get_rec(struct sock *sk)
350 struct tls_context *tls_ctx = tls_get_ctx(sk);
351 struct tls_prot_info *prot = &tls_ctx->prot_info;
352 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
353 struct sk_msg *msg_pl, *msg_en;
357 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
359 rec = kzalloc(mem_size, sk->sk_allocation);
363 msg_pl = &rec->msg_plaintext;
364 msg_en = &rec->msg_encrypted;
369 sg_init_table(rec->sg_aead_in, 2);
370 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
371 sg_unmark_end(&rec->sg_aead_in[1]);
373 sg_init_table(rec->sg_aead_out, 2);
374 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
375 sg_unmark_end(&rec->sg_aead_out[1]);
382 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
384 sk_msg_free(sk, &rec->msg_encrypted);
385 sk_msg_free(sk, &rec->msg_plaintext);
389 static void tls_free_open_rec(struct sock *sk)
391 struct tls_context *tls_ctx = tls_get_ctx(sk);
392 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
393 struct tls_rec *rec = ctx->open_rec;
396 tls_free_rec(sk, rec);
397 ctx->open_rec = NULL;
401 int tls_tx_records(struct sock *sk, int flags)
403 struct tls_context *tls_ctx = tls_get_ctx(sk);
404 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
405 struct tls_rec *rec, *tmp;
406 struct sk_msg *msg_en;
407 int tx_flags, rc = 0;
409 if (tls_is_partially_sent_record(tls_ctx)) {
410 rec = list_first_entry(&ctx->tx_list,
411 struct tls_rec, list);
414 tx_flags = rec->tx_flags;
418 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
422 /* Full record has been transmitted.
423 * Remove the head of tx_list
425 list_del(&rec->list);
426 sk_msg_free(sk, &rec->msg_plaintext);
430 /* Tx all ready records */
431 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
432 if (READ_ONCE(rec->tx_ready)) {
434 tx_flags = rec->tx_flags;
438 msg_en = &rec->msg_encrypted;
439 rc = tls_push_sg(sk, tls_ctx,
440 &msg_en->sg.data[msg_en->sg.curr],
445 list_del(&rec->list);
446 sk_msg_free(sk, &rec->msg_plaintext);
454 if (rc < 0 && rc != -EAGAIN)
455 tls_err_abort(sk, -EBADMSG);
460 static void tls_encrypt_done(crypto_completion_data_t *data, int err)
462 struct aead_request *aead_req = crypto_get_completion_data(data);
463 struct tls_sw_context_tx *ctx;
464 struct tls_context *tls_ctx;
465 struct tls_prot_info *prot;
466 struct scatterlist *sge;
467 struct sk_msg *msg_en;
471 if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
474 rec = container_of(aead_req, struct tls_rec, aead_req);
475 msg_en = &rec->msg_encrypted;
478 tls_ctx = tls_get_ctx(sk);
479 prot = &tls_ctx->prot_info;
480 ctx = tls_sw_ctx_tx(tls_ctx);
482 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
483 sge->offset -= prot->prepend_size;
484 sge->length += prot->prepend_size;
486 /* Check if error is previously set on socket */
487 if (err || sk->sk_err) {
490 /* If err is already set on socket, return the same code */
492 ctx->async_wait.err = -sk->sk_err;
494 ctx->async_wait.err = err;
495 tls_err_abort(sk, err);
500 struct tls_rec *first_rec;
502 /* Mark the record as ready for transmission */
503 smp_store_mb(rec->tx_ready, true);
505 /* If received record is at head of tx_list, schedule tx */
506 first_rec = list_first_entry(&ctx->tx_list,
507 struct tls_rec, list);
508 if (rec == first_rec) {
509 /* Schedule the transmission */
510 if (!test_and_set_bit(BIT_TX_SCHEDULED,
512 schedule_delayed_work(&ctx->tx_work.work, 1);
516 if (atomic_dec_and_test(&ctx->encrypt_pending))
517 complete(&ctx->async_wait.completion);
520 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
522 if (!atomic_dec_and_test(&ctx->encrypt_pending))
523 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
524 atomic_inc(&ctx->encrypt_pending);
526 return ctx->async_wait.err;
529 static int tls_do_encryption(struct sock *sk,
530 struct tls_context *tls_ctx,
531 struct tls_sw_context_tx *ctx,
532 struct aead_request *aead_req,
533 size_t data_len, u32 start)
535 struct tls_prot_info *prot = &tls_ctx->prot_info;
536 struct tls_rec *rec = ctx->open_rec;
537 struct sk_msg *msg_en = &rec->msg_encrypted;
538 struct scatterlist *sge = sk_msg_elem(msg_en, start);
539 int rc, iv_offset = 0;
541 /* For CCM based ciphers, first byte of IV is a constant */
542 switch (prot->cipher_type) {
543 case TLS_CIPHER_AES_CCM_128:
544 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
547 case TLS_CIPHER_SM4_CCM:
548 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
553 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
554 prot->iv_size + prot->salt_size);
556 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
557 tls_ctx->tx.rec_seq);
559 sge->offset += prot->prepend_size;
560 sge->length -= prot->prepend_size;
562 msg_en->sg.curr = start;
564 aead_request_set_tfm(aead_req, ctx->aead_send);
565 aead_request_set_ad(aead_req, prot->aad_size);
566 aead_request_set_crypt(aead_req, rec->sg_aead_in,
568 data_len, rec->iv_data);
570 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
571 tls_encrypt_done, aead_req);
573 /* Add the record in tx_list */
574 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
575 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
576 atomic_inc(&ctx->encrypt_pending);
578 rc = crypto_aead_encrypt(aead_req);
580 rc = tls_encrypt_async_wait(ctx);
581 rc = rc ?: -EINPROGRESS;
583 if (!rc || rc != -EINPROGRESS) {
584 atomic_dec(&ctx->encrypt_pending);
585 sge->offset -= prot->prepend_size;
586 sge->length += prot->prepend_size;
590 WRITE_ONCE(rec->tx_ready, true);
591 } else if (rc != -EINPROGRESS) {
592 list_del(&rec->list);
596 /* Unhook the record from context if encryption is not failure */
597 ctx->open_rec = NULL;
598 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
602 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
603 struct tls_rec **to, struct sk_msg *msg_opl,
604 struct sk_msg *msg_oen, u32 split_point,
605 u32 tx_overhead_size, u32 *orig_end)
607 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
608 struct scatterlist *sge, *osge, *nsge;
609 u32 orig_size = msg_opl->sg.size;
610 struct scatterlist tmp = { };
611 struct sk_msg *msg_npl;
615 new = tls_get_rec(sk);
618 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
619 tx_overhead_size, 0);
621 tls_free_rec(sk, new);
625 *orig_end = msg_opl->sg.end;
626 i = msg_opl->sg.start;
627 sge = sk_msg_elem(msg_opl, i);
628 while (apply && sge->length) {
629 if (sge->length > apply) {
630 u32 len = sge->length - apply;
632 get_page(sg_page(sge));
633 sg_set_page(&tmp, sg_page(sge), len,
634 sge->offset + apply);
639 apply -= sge->length;
640 bytes += sge->length;
643 sk_msg_iter_var_next(i);
644 if (i == msg_opl->sg.end)
646 sge = sk_msg_elem(msg_opl, i);
650 msg_opl->sg.curr = i;
651 msg_opl->sg.copybreak = 0;
652 msg_opl->apply_bytes = 0;
653 msg_opl->sg.size = bytes;
655 msg_npl = &new->msg_plaintext;
656 msg_npl->apply_bytes = apply;
657 msg_npl->sg.size = orig_size - bytes;
659 j = msg_npl->sg.start;
660 nsge = sk_msg_elem(msg_npl, j);
662 memcpy(nsge, &tmp, sizeof(*nsge));
663 sk_msg_iter_var_next(j);
664 nsge = sk_msg_elem(msg_npl, j);
667 osge = sk_msg_elem(msg_opl, i);
668 while (osge->length) {
669 memcpy(nsge, osge, sizeof(*nsge));
671 sk_msg_iter_var_next(i);
672 sk_msg_iter_var_next(j);
675 osge = sk_msg_elem(msg_opl, i);
676 nsge = sk_msg_elem(msg_npl, j);
680 msg_npl->sg.curr = j;
681 msg_npl->sg.copybreak = 0;
687 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
688 struct tls_rec *from, u32 orig_end)
690 struct sk_msg *msg_npl = &from->msg_plaintext;
691 struct sk_msg *msg_opl = &to->msg_plaintext;
692 struct scatterlist *osge, *nsge;
696 sk_msg_iter_var_prev(i);
697 j = msg_npl->sg.start;
699 osge = sk_msg_elem(msg_opl, i);
700 nsge = sk_msg_elem(msg_npl, j);
702 if (sg_page(osge) == sg_page(nsge) &&
703 osge->offset + osge->length == nsge->offset) {
704 osge->length += nsge->length;
705 put_page(sg_page(nsge));
708 msg_opl->sg.end = orig_end;
709 msg_opl->sg.curr = orig_end;
710 msg_opl->sg.copybreak = 0;
711 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
712 msg_opl->sg.size += msg_npl->sg.size;
714 sk_msg_free(sk, &to->msg_encrypted);
715 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
720 static int tls_push_record(struct sock *sk, int flags,
721 unsigned char record_type)
723 struct tls_context *tls_ctx = tls_get_ctx(sk);
724 struct tls_prot_info *prot = &tls_ctx->prot_info;
725 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
726 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
727 u32 i, split_point, orig_end;
728 struct sk_msg *msg_pl, *msg_en;
729 struct aead_request *req;
736 msg_pl = &rec->msg_plaintext;
737 msg_en = &rec->msg_encrypted;
739 split_point = msg_pl->apply_bytes;
740 split = split_point && split_point < msg_pl->sg.size;
741 if (unlikely((!split &&
743 prot->overhead_size > msg_en->sg.size) ||
746 prot->overhead_size > msg_en->sg.size))) {
748 split_point = msg_en->sg.size;
751 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
752 split_point, prot->overhead_size,
756 /* This can happen if above tls_split_open_record allocates
757 * a single large encryption buffer instead of two smaller
758 * ones. In this case adjust pointers and continue without
761 if (!msg_pl->sg.size) {
762 tls_merge_open_record(sk, rec, tmp, orig_end);
763 msg_pl = &rec->msg_plaintext;
764 msg_en = &rec->msg_encrypted;
767 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
768 prot->overhead_size);
771 rec->tx_flags = flags;
772 req = &rec->aead_req;
775 sk_msg_iter_var_prev(i);
777 rec->content_type = record_type;
778 if (prot->version == TLS_1_3_VERSION) {
779 /* Add content type to end of message. No padding added */
780 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
781 sg_mark_end(&rec->sg_content_type);
782 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
783 &rec->sg_content_type);
785 sg_mark_end(sk_msg_elem(msg_pl, i));
788 if (msg_pl->sg.end < msg_pl->sg.start) {
789 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
790 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
794 i = msg_pl->sg.start;
795 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
798 sk_msg_iter_var_prev(i);
799 sg_mark_end(sk_msg_elem(msg_en, i));
801 i = msg_en->sg.start;
802 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
804 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
805 tls_ctx->tx.rec_seq, record_type, prot);
807 tls_fill_prepend(tls_ctx,
808 page_address(sg_page(&msg_en->sg.data[i])) +
809 msg_en->sg.data[i].offset,
810 msg_pl->sg.size + prot->tail_size,
813 tls_ctx->pending_open_record_frags = false;
815 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
816 msg_pl->sg.size + prot->tail_size, i);
818 if (rc != -EINPROGRESS) {
819 tls_err_abort(sk, -EBADMSG);
821 tls_ctx->pending_open_record_frags = true;
822 tls_merge_open_record(sk, rec, tmp, orig_end);
825 ctx->async_capable = 1;
828 msg_pl = &tmp->msg_plaintext;
829 msg_en = &tmp->msg_encrypted;
830 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
831 tls_ctx->pending_open_record_frags = true;
835 return tls_tx_records(sk, flags);
838 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
839 bool full_record, u8 record_type,
840 ssize_t *copied, int flags)
842 struct tls_context *tls_ctx = tls_get_ctx(sk);
843 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
844 struct sk_msg msg_redir = { };
845 struct sk_psock *psock;
846 struct sock *sk_redir;
848 bool enospc, policy, redir_ingress;
852 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
853 psock = sk_psock_get(sk);
854 if (!psock || !policy) {
855 err = tls_push_record(sk, flags, record_type);
856 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
857 *copied -= sk_msg_free(sk, msg);
858 tls_free_open_rec(sk);
862 sk_psock_put(sk, psock);
866 enospc = sk_msg_full(msg);
867 if (psock->eval == __SK_NONE) {
868 delta = msg->sg.size;
869 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
870 delta -= msg->sg.size;
872 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
873 !enospc && !full_record) {
879 if (msg->apply_bytes && msg->apply_bytes < send)
880 send = msg->apply_bytes;
882 switch (psock->eval) {
884 err = tls_push_record(sk, flags, record_type);
885 if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
886 *copied -= sk_msg_free(sk, msg);
887 tls_free_open_rec(sk);
893 redir_ingress = psock->redir_ingress;
894 sk_redir = psock->sk_redir;
895 memcpy(&msg_redir, msg, sizeof(*msg));
896 if (msg->apply_bytes < send)
897 msg->apply_bytes = 0;
899 msg->apply_bytes -= send;
900 sk_msg_return_zero(sk, msg, send);
901 msg->sg.size -= send;
903 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
904 &msg_redir, send, flags);
907 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
910 if (msg->sg.size == 0)
911 tls_free_open_rec(sk);
915 sk_msg_free_partial(sk, msg, send);
916 if (msg->apply_bytes < send)
917 msg->apply_bytes = 0;
919 msg->apply_bytes -= send;
920 if (msg->sg.size == 0)
921 tls_free_open_rec(sk);
922 *copied -= (send + delta);
927 bool reset_eval = !ctx->open_rec;
931 msg = &rec->msg_plaintext;
932 if (!msg->apply_bytes)
936 psock->eval = __SK_NONE;
937 if (psock->sk_redir) {
938 sock_put(psock->sk_redir);
939 psock->sk_redir = NULL;
946 sk_psock_put(sk, psock);
950 static int tls_sw_push_pending_record(struct sock *sk, int flags)
952 struct tls_context *tls_ctx = tls_get_ctx(sk);
953 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
954 struct tls_rec *rec = ctx->open_rec;
955 struct sk_msg *msg_pl;
961 msg_pl = &rec->msg_plaintext;
962 copied = msg_pl->sg.size;
966 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
970 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
972 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
973 struct tls_context *tls_ctx = tls_get_ctx(sk);
974 struct tls_prot_info *prot = &tls_ctx->prot_info;
975 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
976 bool async_capable = ctx->async_capable;
977 unsigned char record_type = TLS_RECORD_TYPE_DATA;
978 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
979 bool eor = !(msg->msg_flags & MSG_MORE);
982 struct sk_msg *msg_pl, *msg_en;
992 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
996 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1001 if (unlikely(msg->msg_controllen)) {
1002 ret = tls_process_cmsg(sk, msg, &record_type);
1004 if (ret == -EINPROGRESS)
1006 else if (ret != -EAGAIN)
1011 while (msg_data_left(msg)) {
1018 rec = ctx->open_rec;
1020 rec = ctx->open_rec = tls_get_rec(sk);
1026 msg_pl = &rec->msg_plaintext;
1027 msg_en = &rec->msg_encrypted;
1029 orig_size = msg_pl->sg.size;
1030 full_record = false;
1031 try_to_copy = msg_data_left(msg);
1032 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1033 if (try_to_copy >= record_room) {
1034 try_to_copy = record_room;
1038 required_size = msg_pl->sg.size + try_to_copy +
1039 prot->overhead_size;
1041 if (!sk_stream_memory_free(sk))
1042 goto wait_for_sndbuf;
1045 ret = tls_alloc_encrypted_msg(sk, required_size);
1048 goto wait_for_memory;
1050 /* Adjust try_to_copy according to the amount that was
1051 * actually allocated. The difference is due
1052 * to max sg elements limit
1054 try_to_copy -= required_size - msg_en->sg.size;
1058 if (!is_kvec && (full_record || eor) && !async_capable) {
1059 u32 first = msg_pl->sg.end;
1061 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1062 msg_pl, try_to_copy);
1064 goto fallback_to_reg_send;
1067 copied += try_to_copy;
1069 sk_msg_sg_copy_set(msg_pl, first);
1070 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1071 record_type, &copied,
1074 if (ret == -EINPROGRESS)
1076 else if (ret == -ENOMEM)
1077 goto wait_for_memory;
1078 else if (ctx->open_rec && ret == -ENOSPC)
1080 else if (ret != -EAGAIN)
1085 copied -= try_to_copy;
1086 sk_msg_sg_copy_clear(msg_pl, first);
1087 iov_iter_revert(&msg->msg_iter,
1088 msg_pl->sg.size - orig_size);
1089 fallback_to_reg_send:
1090 sk_msg_trim(sk, msg_pl, orig_size);
1093 required_size = msg_pl->sg.size + try_to_copy;
1095 ret = tls_clone_plaintext_msg(sk, required_size);
1100 /* Adjust try_to_copy according to the amount that was
1101 * actually allocated. The difference is due
1102 * to max sg elements limit
1104 try_to_copy -= required_size - msg_pl->sg.size;
1106 sk_msg_trim(sk, msg_en,
1107 msg_pl->sg.size + prot->overhead_size);
1111 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1112 msg_pl, try_to_copy);
1117 /* Open records defined only if successfully copied, otherwise
1118 * we would trim the sg but not reset the open record frags.
1120 tls_ctx->pending_open_record_frags = true;
1121 copied += try_to_copy;
1122 if (full_record || eor) {
1123 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1124 record_type, &copied,
1127 if (ret == -EINPROGRESS)
1129 else if (ret == -ENOMEM)
1130 goto wait_for_memory;
1131 else if (ret != -EAGAIN) {
1142 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1144 ret = sk_stream_wait_memory(sk, &timeo);
1148 tls_trim_both_msgs(sk, orig_size);
1152 if (ctx->open_rec && msg_en->sg.size < required_size)
1153 goto alloc_encrypted;
1158 } else if (num_zc) {
1161 /* Wait for pending encryptions to get completed */
1162 err = tls_encrypt_async_wait(ctx);
1169 /* Transmit if any encryptions have completed */
1170 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1171 cancel_delayed_work(&ctx->tx_work.work);
1172 tls_tx_records(sk, msg->msg_flags);
1176 ret = sk_stream_error(sk, msg->msg_flags, ret);
1179 mutex_unlock(&tls_ctx->tx_lock);
1180 return copied > 0 ? copied : ret;
1184 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1186 void tls_sw_splice_eof(struct socket *sock)
1188 struct sock *sk = sock->sk;
1189 struct tls_context *tls_ctx = tls_get_ctx(sk);
1190 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1191 struct tls_rec *rec;
1192 struct sk_msg *msg_pl;
1194 bool retrying = false;
1200 mutex_lock(&tls_ctx->tx_lock);
1204 /* same checks as in tls_sw_push_pending_record() */
1205 rec = ctx->open_rec;
1209 msg_pl = &rec->msg_plaintext;
1210 if (msg_pl->sg.size == 0)
1213 /* Check the BPF advisor and perform transmission. */
1214 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1229 /* Wait for pending encryptions to get completed */
1230 if (tls_encrypt_async_wait(ctx))
1233 /* Transmit if any encryptions have completed */
1234 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1235 cancel_delayed_work(&ctx->tx_work.work);
1236 tls_tx_records(sk, 0);
1241 mutex_unlock(&tls_ctx->tx_lock);
1244 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1245 int offset, size_t size, int flags)
1247 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1248 struct tls_context *tls_ctx = tls_get_ctx(sk);
1249 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1250 struct tls_prot_info *prot = &tls_ctx->prot_info;
1251 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1252 struct sk_msg *msg_pl;
1253 struct tls_rec *rec;
1261 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1262 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1264 /* Call the sk_stream functions to manage the sndbuf mem. */
1266 size_t copy, required_size;
1274 rec = ctx->open_rec;
1276 rec = ctx->open_rec = tls_get_rec(sk);
1282 msg_pl = &rec->msg_plaintext;
1284 full_record = false;
1285 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1287 if (copy >= record_room) {
1292 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1294 if (!sk_stream_memory_free(sk))
1295 goto wait_for_sndbuf;
1297 ret = tls_alloc_encrypted_msg(sk, required_size);
1300 goto wait_for_memory;
1302 /* Adjust copy according to the amount that was
1303 * actually allocated. The difference is due
1304 * to max sg elements limit
1306 copy -= required_size - msg_pl->sg.size;
1310 sk_msg_page_add(msg_pl, page, copy, offset);
1311 msg_pl->sg.copybreak = 0;
1312 msg_pl->sg.curr = msg_pl->sg.end;
1313 sk_mem_charge(sk, copy);
1319 tls_ctx->pending_open_record_frags = true;
1320 if (full_record || eor || sk_msg_full(msg_pl)) {
1321 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1322 record_type, &copied, flags);
1324 if (ret == -EINPROGRESS)
1326 else if (ret == -ENOMEM)
1327 goto wait_for_memory;
1328 else if (ret != -EAGAIN) {
1337 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1339 ret = sk_stream_wait_memory(sk, &timeo);
1342 tls_trim_both_msgs(sk, msg_pl->sg.size);
1351 /* Transmit if any encryptions have completed */
1352 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1353 cancel_delayed_work(&ctx->tx_work.work);
1354 tls_tx_records(sk, flags);
1358 ret = sk_stream_error(sk, flags, ret);
1359 return copied > 0 ? copied : ret;
1362 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1363 int offset, size_t size, int flags)
1365 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1366 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1367 MSG_NO_SHARED_FRAGS))
1370 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1373 int tls_sw_sendpage(struct sock *sk, struct page *page,
1374 int offset, size_t size, int flags)
1376 struct tls_context *tls_ctx = tls_get_ctx(sk);
1379 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1380 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1383 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1387 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1389 mutex_unlock(&tls_ctx->tx_lock);
1394 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1397 struct tls_context *tls_ctx = tls_get_ctx(sk);
1398 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1399 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1403 timeo = sock_rcvtimeo(sk, nonblock);
1405 while (!tls_strp_msg_ready(ctx)) {
1406 if (!sk_psock_queue_empty(psock))
1410 return sock_error(sk);
1415 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1416 tls_strp_check_rcv(&ctx->strp);
1417 if (tls_strp_msg_ready(ctx))
1421 if (sk->sk_shutdown & RCV_SHUTDOWN)
1424 if (sock_flag(sk, SOCK_DONE))
1431 add_wait_queue(sk_sleep(sk), &wait);
1432 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1433 ret = sk_wait_event(sk, &timeo,
1434 tls_strp_msg_ready(ctx) ||
1435 !sk_psock_queue_empty(psock),
1437 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1438 remove_wait_queue(sk_sleep(sk), &wait);
1440 /* Handle signals */
1441 if (signal_pending(current))
1442 return sock_intr_errno(timeo);
1445 tls_strp_msg_load(&ctx->strp, released);
1450 static int tls_setup_from_iter(struct iov_iter *from,
1451 int length, int *pages_used,
1452 struct scatterlist *to,
1455 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1456 struct page *pages[MAX_SKB_FRAGS];
1457 unsigned int size = 0;
1458 ssize_t copied, use;
1461 while (length > 0) {
1463 maxpages = to_max_pages - num_elem;
1464 if (maxpages == 0) {
1468 copied = iov_iter_get_pages2(from, pages,
1479 use = min_t(int, copied, PAGE_SIZE - offset);
1481 sg_set_page(&to[num_elem],
1482 pages[i], use, offset);
1483 sg_unmark_end(&to[num_elem]);
1484 /* We do not uncharge memory from this API */
1493 /* Mark the end in the last sg entry if newly added */
1494 if (num_elem > *pages_used)
1495 sg_mark_end(&to[num_elem - 1]);
1498 iov_iter_revert(from, size);
1499 *pages_used = num_elem;
1504 static struct sk_buff *
1505 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1506 unsigned int full_len)
1508 struct strp_msg *clr_rxm;
1509 struct sk_buff *clr_skb;
1512 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1513 &err, sk->sk_allocation);
1517 skb_copy_header(clr_skb, skb);
1518 clr_skb->len = full_len;
1519 clr_skb->data_len = full_len;
1521 clr_rxm = strp_msg(clr_skb);
1522 clr_rxm->offset = 0;
1529 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1530 * They must transform the darg in/out argument are as follows:
1532 * -------------------------------------------------------------------
1533 * zc | Zero-copy decrypt allowed | Zero-copy performed
1534 * async | Async decrypt allowed | Async crypto used / in progress
1535 * skb | * | Output skb
1537 * If ZC decryption was performed darg.skb will point to the input skb.
1540 /* This function decrypts the input skb into either out_iov or in out_sg
1541 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1542 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1543 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1544 * NULL, then the decryption happens inside skb buffers itself, i.e.
1545 * zero-copy gets disabled and 'darg->zc' is updated.
1547 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1548 struct scatterlist *out_sg,
1549 struct tls_decrypt_arg *darg)
1551 struct tls_context *tls_ctx = tls_get_ctx(sk);
1552 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1553 struct tls_prot_info *prot = &tls_ctx->prot_info;
1554 int n_sgin, n_sgout, aead_size, err, pages = 0;
1555 struct sk_buff *skb = tls_strp_msg(ctx);
1556 const struct strp_msg *rxm = strp_msg(skb);
1557 const struct tls_msg *tlm = tls_msg(skb);
1558 struct aead_request *aead_req;
1559 struct scatterlist *sgin = NULL;
1560 struct scatterlist *sgout = NULL;
1561 const int data_len = rxm->full_len - prot->overhead_size;
1562 int tail_pages = !!prot->tail_size;
1563 struct tls_decrypt_ctx *dctx;
1564 struct sk_buff *clear_skb;
1568 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1569 rxm->full_len - prot->prepend_size);
1571 return n_sgin ?: -EBADMSG;
1573 if (darg->zc && (out_iov || out_sg)) {
1577 n_sgout = 1 + tail_pages +
1578 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1580 n_sgout = sg_nents(out_sg);
1584 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1588 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1591 /* Increment to accommodate AAD */
1592 n_sgin = n_sgin + 1;
1594 /* Allocate a single block of memory which contains
1595 * aead_req || tls_decrypt_ctx.
1596 * Both structs are variable length.
1598 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1599 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1600 mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1607 /* Segment the allocated memory */
1608 aead_req = (struct aead_request *)mem;
1609 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1611 sgin = &dctx->sg[0];
1612 sgout = &dctx->sg[n_sgin];
1614 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1615 switch (prot->cipher_type) {
1616 case TLS_CIPHER_AES_CCM_128:
1617 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1620 case TLS_CIPHER_SM4_CCM:
1621 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1627 if (prot->version == TLS_1_3_VERSION ||
1628 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1629 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1630 prot->iv_size + prot->salt_size);
1632 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1633 &dctx->iv[iv_offset] + prot->salt_size,
1637 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1639 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1642 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1644 tls_ctx->rx.rec_seq, tlm->control, prot);
1647 sg_init_table(sgin, n_sgin);
1648 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1649 err = skb_to_sgvec(skb, &sgin[1],
1650 rxm->offset + prot->prepend_size,
1651 rxm->full_len - prot->prepend_size);
1656 sg_init_table(sgout, n_sgout);
1657 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1659 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1660 data_len + prot->tail_size);
1663 } else if (out_iov) {
1664 sg_init_table(sgout, n_sgout);
1665 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1667 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1668 (n_sgout - 1 - tail_pages));
1670 goto exit_free_pages;
1672 if (prot->tail_size) {
1673 sg_unmark_end(&sgout[pages]);
1674 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1676 sg_mark_end(&sgout[pages + 1]);
1678 } else if (out_sg) {
1679 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1681 dctx->free_sgout = !!pages;
1683 /* Prepare and submit AEAD request */
1684 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1685 data_len + prot->tail_size, aead_req, darg);
1687 if (darg->async_done)
1689 goto exit_free_pages;
1692 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1695 if (unlikely(darg->async)) {
1696 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1698 __skb_queue_tail(&ctx->async_hold, darg->skb);
1702 if (unlikely(darg->async_done))
1705 if (prot->tail_size)
1706 darg->tail = dctx->tail;
1709 /* Release the pages in case iov was mapped to pages */
1710 for (; pages > 0; pages--)
1711 put_page(sg_page(&sgout[pages]));
1715 consume_skb(clear_skb);
1720 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1721 struct msghdr *msg, struct tls_decrypt_arg *darg)
1723 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1724 struct tls_prot_info *prot = &tls_ctx->prot_info;
1725 struct strp_msg *rxm;
1728 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1730 if (err == -EBADMSG)
1731 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1734 /* keep going even for ->async, the code below is TLS 1.3 */
1736 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1737 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1738 darg->tail != TLS_RECORD_TYPE_DATA)) {
1741 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1742 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1743 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1746 pad = tls_padding_length(prot, darg->skb, darg);
1748 if (darg->skb != tls_strp_msg(ctx))
1749 consume_skb(darg->skb);
1753 rxm = strp_msg(darg->skb);
1754 rxm->full_len -= pad;
1760 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1761 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1763 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1764 struct tls_prot_info *prot = &tls_ctx->prot_info;
1765 struct strp_msg *rxm;
1768 if (tls_ctx->rx_conf != TLS_HW)
1771 err = tls_device_decrypted(sk, tls_ctx);
1775 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1779 darg->async = false;
1780 darg->skb = tls_strp_msg(ctx);
1781 /* ->zc downgrade check, in case TLS 1.3 gets here */
1782 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1783 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1785 rxm = strp_msg(darg->skb);
1786 rxm->full_len -= pad;
1789 /* Non-ZC case needs a real skb */
1790 darg->skb = tls_strp_msg_detach(ctx);
1794 unsigned int off, len;
1796 /* In ZC case nobody cares about the output skb.
1797 * Just copy the data here. Note the skb is not fully trimmed.
1799 off = rxm->offset + prot->prepend_size;
1800 len = rxm->full_len - prot->overhead_size;
1802 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1809 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1810 struct tls_decrypt_arg *darg)
1812 struct tls_context *tls_ctx = tls_get_ctx(sk);
1813 struct tls_prot_info *prot = &tls_ctx->prot_info;
1814 struct strp_msg *rxm;
1817 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1819 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1823 rxm = strp_msg(darg->skb);
1824 rxm->offset += prot->prepend_size;
1825 rxm->full_len -= prot->overhead_size;
1826 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1831 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1833 struct tls_decrypt_arg darg = { .zc = true, };
1835 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1838 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1844 *control = tlm->control;
1848 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1849 sizeof(*control), control);
1850 if (*control != TLS_RECORD_TYPE_DATA) {
1851 if (err || msg->msg_flags & MSG_CTRUNC)
1854 } else if (*control != tlm->control) {
1861 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1863 tls_strp_msg_done(&ctx->strp);
1866 /* This function traverses the rx_list in tls receive context to copies the
1867 * decrypted records into the buffer provided by caller zero copy is not
1868 * true. Further, the records are removed from the rx_list if it is not a peek
1869 * case and the record has been consumed completely.
1871 static int process_rx_list(struct tls_sw_context_rx *ctx,
1879 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1880 struct tls_msg *tlm;
1884 while (skip && skb) {
1885 struct strp_msg *rxm = strp_msg(skb);
1888 err = tls_record_content_type(msg, tlm, control);
1892 if (skip < rxm->full_len)
1895 skip = skip - rxm->full_len;
1896 skb = skb_peek_next(skb, &ctx->rx_list);
1899 while (len && skb) {
1900 struct sk_buff *next_skb;
1901 struct strp_msg *rxm = strp_msg(skb);
1902 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1906 err = tls_record_content_type(msg, tlm, control);
1910 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1916 copied = copied + chunk;
1918 /* Consume the data from record if it is non-peek case*/
1920 rxm->offset = rxm->offset + chunk;
1921 rxm->full_len = rxm->full_len - chunk;
1923 /* Return if there is unconsumed data in the record */
1924 if (rxm->full_len - skip)
1928 /* The remaining skip-bytes must lie in 1st record in rx_list.
1929 * So from the 2nd record, 'skip' should be 0.
1934 msg->msg_flags |= MSG_EOR;
1936 next_skb = skb_peek_next(skb, &ctx->rx_list);
1939 __skb_unlink(skb, &ctx->rx_list);
1948 return copied ? : err;
1956 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1957 size_t len_left, size_t decrypted, ssize_t done,
1962 if (len_left <= decrypted)
1965 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1966 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1970 return sk_flush_backlog(sk);
1973 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1979 timeo = sock_rcvtimeo(sk, nonblock);
1981 while (unlikely(ctx->reader_present)) {
1982 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1984 ctx->reader_contended = 1;
1986 add_wait_queue(&ctx->wq, &wait);
1987 ret = sk_wait_event(sk, &timeo,
1988 !READ_ONCE(ctx->reader_present), &wait);
1989 remove_wait_queue(&ctx->wq, &wait);
1993 if (signal_pending(current))
1994 return sock_intr_errno(timeo);
1999 WRITE_ONCE(ctx->reader_present, 1);
2004 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
2010 err = tls_rx_reader_acquire(sk, ctx, nonblock);
2016 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
2018 if (unlikely(ctx->reader_contended)) {
2019 if (wq_has_sleeper(&ctx->wq))
2022 ctx->reader_contended = 0;
2024 WARN_ON_ONCE(!ctx->reader_present);
2027 WRITE_ONCE(ctx->reader_present, 0);
2030 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
2032 tls_rx_reader_release(sk, ctx);
2036 int tls_sw_recvmsg(struct sock *sk,
2042 struct tls_context *tls_ctx = tls_get_ctx(sk);
2043 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2044 struct tls_prot_info *prot = &tls_ctx->prot_info;
2045 ssize_t decrypted = 0, async_copy_bytes = 0;
2046 struct sk_psock *psock;
2047 unsigned char control = 0;
2048 size_t flushed_at = 0;
2049 struct strp_msg *rxm;
2050 struct tls_msg *tlm;
2055 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2056 bool is_peek = flags & MSG_PEEK;
2057 bool rx_more = false;
2058 bool released = true;
2059 bool bpf_strp_enabled;
2062 if (unlikely(flags & MSG_ERRQUEUE))
2063 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2065 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2068 psock = sk_psock_get(sk);
2069 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2071 /* If crypto failed the connection is broken */
2072 err = ctx->async_wait.err;
2076 /* Process pending decrypted records. It must be non-zero-copy */
2077 err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2082 if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
2085 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2088 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2091 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2092 struct tls_decrypt_arg darg;
2093 int to_decrypt, chunk;
2095 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2099 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2110 memset(&darg.inargs, 0, sizeof(darg.inargs));
2112 rxm = strp_msg(tls_strp_msg(ctx));
2113 tlm = tls_msg(tls_strp_msg(ctx));
2115 to_decrypt = rxm->full_len - prot->overhead_size;
2117 if (zc_capable && to_decrypt <= len &&
2118 tlm->control == TLS_RECORD_TYPE_DATA)
2121 /* Do not use async mode if record is non-data */
2122 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2123 darg.async = ctx->async_capable;
2127 err = tls_rx_one_record(sk, msg, &darg);
2129 tls_err_abort(sk, -EBADMSG);
2133 async |= darg.async;
2135 /* If the type of records being processed is not known yet,
2136 * set it to record type just dequeued. If it is already known,
2137 * but does not match the record type just dequeued, go to end.
2138 * We always get record type here since for tls1.2, record type
2139 * is known just after record is dequeued from stream parser.
2140 * For tls1.3, we disable async.
2142 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2144 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2145 tls_rx_rec_done(ctx);
2147 __skb_queue_tail(&ctx->rx_list, darg.skb);
2151 /* periodically flush backlog, and feed strparser */
2152 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2156 /* TLS 1.3 may have updated the length by more than overhead */
2157 rxm = strp_msg(darg.skb);
2158 chunk = rxm->full_len;
2159 tls_rx_rec_done(ctx);
2162 bool partially_consumed = chunk > len;
2163 struct sk_buff *skb = darg.skb;
2165 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2168 /* TLS 1.2-only, to_decrypt must be text len */
2169 chunk = min_t(int, to_decrypt, len);
2170 async_copy_bytes += chunk;
2174 __skb_queue_tail(&ctx->rx_list, skb);
2175 if (unlikely(control != TLS_RECORD_TYPE_DATA))
2180 if (bpf_strp_enabled) {
2182 err = sk_psock_tls_strp_read(psock, skb);
2183 if (err != __SK_PASS) {
2184 rxm->offset = rxm->offset + rxm->full_len;
2186 if (err == __SK_DROP)
2192 if (partially_consumed)
2195 err = skb_copy_datagram_msg(skb, rxm->offset,
2198 goto put_on_rx_list_err;
2202 goto put_on_rx_list;
2205 if (partially_consumed) {
2206 rxm->offset += chunk;
2207 rxm->full_len -= chunk;
2208 goto put_on_rx_list;
2217 /* Return full control message to userspace before trying
2218 * to parse another message type
2220 msg->msg_flags |= MSG_EOR;
2221 if (control != TLS_RECORD_TYPE_DATA)
2229 /* Wait for all previously submitted records to be decrypted */
2230 ret = tls_decrypt_async_wait(ctx);
2231 __skb_queue_purge(&ctx->async_hold);
2234 if (err >= 0 || err == -EINPROGRESS)
2240 /* Drain records from the rx_list & copy if required */
2242 err = process_rx_list(ctx, msg, &control, copied + peeked,
2243 decrypted - peeked, is_peek, NULL);
2245 err = process_rx_list(ctx, msg, &control, 0,
2246 async_copy_bytes, is_peek, NULL);
2248 /* we could have copied less than we wanted, and possibly nothing */
2249 decrypted += max(err, 0) - async_copy_bytes;
2252 copied += decrypted;
2255 tls_rx_reader_unlock(sk, ctx);
2257 sk_psock_put(sk, psock);
2258 return copied ? : err;
2261 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2262 struct pipe_inode_info *pipe,
2263 size_t len, unsigned int flags)
2265 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2266 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2267 struct strp_msg *rxm = NULL;
2268 struct sock *sk = sock->sk;
2269 struct tls_msg *tlm;
2270 struct sk_buff *skb;
2275 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2279 if (!skb_queue_empty(&ctx->rx_list)) {
2280 skb = __skb_dequeue(&ctx->rx_list);
2282 struct tls_decrypt_arg darg;
2284 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2287 goto splice_read_end;
2289 memset(&darg.inargs, 0, sizeof(darg.inargs));
2291 err = tls_rx_one_record(sk, NULL, &darg);
2293 tls_err_abort(sk, -EBADMSG);
2294 goto splice_read_end;
2297 tls_rx_rec_done(ctx);
2301 rxm = strp_msg(skb);
2304 /* splice does not support reading control messages */
2305 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2307 goto splice_requeue;
2310 chunk = min_t(unsigned int, rxm->full_len, len);
2311 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2313 goto splice_requeue;
2315 if (chunk < rxm->full_len) {
2317 rxm->full_len -= len;
2318 goto splice_requeue;
2324 tls_rx_reader_unlock(sk, ctx);
2325 return copied ? : err;
2328 __skb_queue_head(&ctx->rx_list, skb);
2329 goto splice_read_end;
2332 bool tls_sw_sock_is_readable(struct sock *sk)
2334 struct tls_context *tls_ctx = tls_get_ctx(sk);
2335 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2336 bool ingress_empty = true;
2337 struct sk_psock *psock;
2340 psock = sk_psock(sk);
2342 ingress_empty = list_empty(&psock->ingress_msg);
2345 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2346 !skb_queue_empty(&ctx->rx_list);
2349 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2351 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2352 struct tls_prot_info *prot = &tls_ctx->prot_info;
2353 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2354 size_t cipher_overhead;
2355 size_t data_len = 0;
2358 /* Verify that we have a full TLS header, or wait for more data */
2359 if (strp->stm.offset + prot->prepend_size > skb->len)
2362 /* Sanity-check size of on-stack buffer. */
2363 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2368 /* Linearize header to local buffer */
2369 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2373 strp->mark = header[0];
2375 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2377 cipher_overhead = prot->tag_size;
2378 if (prot->version != TLS_1_3_VERSION &&
2379 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2380 cipher_overhead += prot->iv_size;
2382 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2387 if (data_len < cipher_overhead) {
2392 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2393 if (header[1] != TLS_1_2_VERSION_MINOR ||
2394 header[2] != TLS_1_2_VERSION_MAJOR) {
2399 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2400 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2401 return data_len + TLS_HEADER_SIZE;
2404 tls_err_abort(strp->sk, ret);
2409 void tls_rx_msg_ready(struct tls_strparser *strp)
2411 struct tls_sw_context_rx *ctx;
2413 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2414 ctx->saved_data_ready(strp->sk);
2417 static void tls_data_ready(struct sock *sk)
2419 struct tls_context *tls_ctx = tls_get_ctx(sk);
2420 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2421 struct sk_psock *psock;
2424 alloc_save = sk->sk_allocation;
2425 sk->sk_allocation = GFP_ATOMIC;
2426 tls_strp_data_ready(&ctx->strp);
2427 sk->sk_allocation = alloc_save;
2429 psock = sk_psock_get(sk);
2431 if (!list_empty(&psock->ingress_msg))
2432 ctx->saved_data_ready(sk);
2433 sk_psock_put(sk, psock);
2437 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2439 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2441 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2442 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2443 cancel_delayed_work_sync(&ctx->tx_work.work);
2446 void tls_sw_release_resources_tx(struct sock *sk)
2448 struct tls_context *tls_ctx = tls_get_ctx(sk);
2449 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2450 struct tls_rec *rec, *tmp;
2452 /* Wait for any pending async encryptions to complete */
2453 tls_encrypt_async_wait(ctx);
2455 tls_tx_records(sk, -1);
2457 /* Free up un-sent records in tx_list. First, free
2458 * the partially sent record if any at head of tx_list.
2460 if (tls_ctx->partially_sent_record) {
2461 tls_free_partial_record(sk, tls_ctx);
2462 rec = list_first_entry(&ctx->tx_list,
2463 struct tls_rec, list);
2464 list_del(&rec->list);
2465 sk_msg_free(sk, &rec->msg_plaintext);
2469 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2470 list_del(&rec->list);
2471 sk_msg_free(sk, &rec->msg_encrypted);
2472 sk_msg_free(sk, &rec->msg_plaintext);
2476 crypto_free_aead(ctx->aead_send);
2477 tls_free_open_rec(sk);
2480 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2482 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2487 void tls_sw_release_resources_rx(struct sock *sk)
2489 struct tls_context *tls_ctx = tls_get_ctx(sk);
2490 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2492 kfree(tls_ctx->rx.rec_seq);
2493 kfree(tls_ctx->rx.iv);
2495 if (ctx->aead_recv) {
2496 __skb_queue_purge(&ctx->rx_list);
2497 crypto_free_aead(ctx->aead_recv);
2498 tls_strp_stop(&ctx->strp);
2499 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2500 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2503 if (ctx->saved_data_ready) {
2504 write_lock_bh(&sk->sk_callback_lock);
2505 sk->sk_data_ready = ctx->saved_data_ready;
2506 write_unlock_bh(&sk->sk_callback_lock);
2511 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2513 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2515 tls_strp_done(&ctx->strp);
2518 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2520 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2525 void tls_sw_free_resources_rx(struct sock *sk)
2527 struct tls_context *tls_ctx = tls_get_ctx(sk);
2529 tls_sw_release_resources_rx(sk);
2530 tls_sw_free_ctx_rx(tls_ctx);
2533 /* The work handler to transmitt the encrypted records in tx_list */
2534 static void tx_work_handler(struct work_struct *work)
2536 struct delayed_work *delayed_work = to_delayed_work(work);
2537 struct tx_work *tx_work = container_of(delayed_work,
2538 struct tx_work, work);
2539 struct sock *sk = tx_work->sk;
2540 struct tls_context *tls_ctx = tls_get_ctx(sk);
2541 struct tls_sw_context_tx *ctx;
2543 if (unlikely(!tls_ctx))
2546 ctx = tls_sw_ctx_tx(tls_ctx);
2547 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2550 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2553 if (mutex_trylock(&tls_ctx->tx_lock)) {
2555 tls_tx_records(sk, -1);
2557 mutex_unlock(&tls_ctx->tx_lock);
2558 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2559 /* Someone is holding the tx_lock, they will likely run Tx
2560 * and cancel the work on their way out of the lock section.
2561 * Schedule a long delay just in case.
2563 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2567 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2569 struct tls_rec *rec;
2571 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2575 return READ_ONCE(rec->tx_ready);
2578 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2580 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2582 /* Schedule the transmission if tx list is ready */
2583 if (tls_is_tx_ready(tx_ctx) &&
2584 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2585 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2588 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2590 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2592 write_lock_bh(&sk->sk_callback_lock);
2593 rx_ctx->saved_data_ready = sk->sk_data_ready;
2594 sk->sk_data_ready = tls_data_ready;
2595 write_unlock_bh(&sk->sk_callback_lock);
2598 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2600 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2602 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2603 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2606 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2608 struct tls_sw_context_tx *sw_ctx_tx;
2610 if (!ctx->priv_ctx_tx) {
2611 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2615 sw_ctx_tx = ctx->priv_ctx_tx;
2618 crypto_init_wait(&sw_ctx_tx->async_wait);
2619 atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2620 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2621 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2622 sw_ctx_tx->tx_work.sk = sk;
2627 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2629 struct tls_sw_context_rx *sw_ctx_rx;
2631 if (!ctx->priv_ctx_rx) {
2632 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2636 sw_ctx_rx = ctx->priv_ctx_rx;
2639 crypto_init_wait(&sw_ctx_rx->async_wait);
2640 atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2641 init_waitqueue_head(&sw_ctx_rx->wq);
2642 skb_queue_head_init(&sw_ctx_rx->rx_list);
2643 skb_queue_head_init(&sw_ctx_rx->async_hold);
2648 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2650 struct tls_context *tls_ctx = tls_get_ctx(sk);
2651 struct tls_prot_info *prot = &tls_ctx->prot_info;
2652 struct tls_crypto_info *crypto_info;
2653 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2654 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2655 struct cipher_context *cctx;
2656 struct crypto_aead **aead;
2657 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2658 struct crypto_tfm *tfm;
2659 char *iv, *rec_seq, *key, *salt, *cipher_name;
2669 ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2670 if (!ctx->priv_ctx_tx)
2673 sw_ctx_tx = ctx->priv_ctx_tx;
2674 crypto_info = &ctx->crypto_send.info;
2676 aead = &sw_ctx_tx->aead_send;
2678 ctx->priv_ctx_rx = init_ctx_rx(ctx);
2679 if (!ctx->priv_ctx_rx)
2682 sw_ctx_rx = ctx->priv_ctx_rx;
2683 crypto_info = &ctx->crypto_recv.info;
2685 aead = &sw_ctx_rx->aead_recv;
2688 switch (crypto_info->cipher_type) {
2689 case TLS_CIPHER_AES_GCM_128: {
2690 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2692 gcm_128_info = (void *)crypto_info;
2693 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2694 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2695 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2696 iv = gcm_128_info->iv;
2697 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2698 rec_seq = gcm_128_info->rec_seq;
2699 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2700 key = gcm_128_info->key;
2701 salt = gcm_128_info->salt;
2702 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2703 cipher_name = "gcm(aes)";
2706 case TLS_CIPHER_AES_GCM_256: {
2707 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2709 gcm_256_info = (void *)crypto_info;
2710 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2711 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2712 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2713 iv = gcm_256_info->iv;
2714 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2715 rec_seq = gcm_256_info->rec_seq;
2716 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2717 key = gcm_256_info->key;
2718 salt = gcm_256_info->salt;
2719 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2720 cipher_name = "gcm(aes)";
2723 case TLS_CIPHER_AES_CCM_128: {
2724 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2726 ccm_128_info = (void *)crypto_info;
2727 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2728 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2729 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2730 iv = ccm_128_info->iv;
2731 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2732 rec_seq = ccm_128_info->rec_seq;
2733 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2734 key = ccm_128_info->key;
2735 salt = ccm_128_info->salt;
2736 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2737 cipher_name = "ccm(aes)";
2740 case TLS_CIPHER_CHACHA20_POLY1305: {
2741 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2743 chacha20_poly1305_info = (void *)crypto_info;
2745 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2746 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2747 iv = chacha20_poly1305_info->iv;
2748 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2749 rec_seq = chacha20_poly1305_info->rec_seq;
2750 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2751 key = chacha20_poly1305_info->key;
2752 salt = chacha20_poly1305_info->salt;
2753 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2754 cipher_name = "rfc7539(chacha20,poly1305)";
2757 case TLS_CIPHER_SM4_GCM: {
2758 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2760 sm4_gcm_info = (void *)crypto_info;
2761 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2762 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2763 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2764 iv = sm4_gcm_info->iv;
2765 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2766 rec_seq = sm4_gcm_info->rec_seq;
2767 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2768 key = sm4_gcm_info->key;
2769 salt = sm4_gcm_info->salt;
2770 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2771 cipher_name = "gcm(sm4)";
2774 case TLS_CIPHER_SM4_CCM: {
2775 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2777 sm4_ccm_info = (void *)crypto_info;
2778 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2779 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2780 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2781 iv = sm4_ccm_info->iv;
2782 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2783 rec_seq = sm4_ccm_info->rec_seq;
2784 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2785 key = sm4_ccm_info->key;
2786 salt = sm4_ccm_info->salt;
2787 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2788 cipher_name = "ccm(sm4)";
2791 case TLS_CIPHER_ARIA_GCM_128: {
2792 struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
2794 aria_gcm_128_info = (void *)crypto_info;
2795 nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2796 tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
2797 iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2798 iv = aria_gcm_128_info->iv;
2799 rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
2800 rec_seq = aria_gcm_128_info->rec_seq;
2801 keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
2802 key = aria_gcm_128_info->key;
2803 salt = aria_gcm_128_info->salt;
2804 salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
2805 cipher_name = "gcm(aria)";
2808 case TLS_CIPHER_ARIA_GCM_256: {
2809 struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
2811 gcm_256_info = (void *)crypto_info;
2812 nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2813 tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
2814 iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2815 iv = gcm_256_info->iv;
2816 rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
2817 rec_seq = gcm_256_info->rec_seq;
2818 keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
2819 key = gcm_256_info->key;
2820 salt = gcm_256_info->salt;
2821 salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
2822 cipher_name = "gcm(aria)";
2830 if (crypto_info->version == TLS_1_3_VERSION) {
2832 prot->aad_size = TLS_HEADER_SIZE;
2833 prot->tail_size = 1;
2835 prot->aad_size = TLS_AAD_SPACE_SIZE;
2836 prot->tail_size = 0;
2839 /* Sanity-check the sizes for stack allocations. */
2840 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2841 rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
2842 prot->aad_size > TLS_MAX_AAD_SIZE) {
2847 prot->version = crypto_info->version;
2848 prot->cipher_type = crypto_info->cipher_type;
2849 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2850 prot->tag_size = tag_size;
2851 prot->overhead_size = prot->prepend_size +
2852 prot->tag_size + prot->tail_size;
2853 prot->iv_size = iv_size;
2854 prot->salt_size = salt_size;
2855 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2860 /* Note: 128 & 256 bit salt are the same size */
2861 prot->rec_seq_size = rec_seq_size;
2862 memcpy(cctx->iv, salt, salt_size);
2863 memcpy(cctx->iv + salt_size, iv, iv_size);
2864 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2865 if (!cctx->rec_seq) {
2871 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2872 if (IS_ERR(*aead)) {
2873 rc = PTR_ERR(*aead);
2879 ctx->push_pending_record = tls_sw_push_pending_record;
2881 rc = crypto_aead_setkey(*aead, key, keysize);
2886 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2891 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2893 tls_update_rx_zc_capable(ctx);
2894 sw_ctx_rx->async_capable =
2895 crypto_info->version != TLS_1_3_VERSION &&
2896 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2898 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2906 crypto_free_aead(*aead);
2909 kfree(cctx->rec_seq);
2910 cctx->rec_seq = NULL;
2916 kfree(ctx->priv_ctx_tx);
2917 ctx->priv_ctx_tx = NULL;
2919 kfree(ctx->priv_ctx_rx);
2920 ctx->priv_ctx_rx = NULL;