2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/splice.h>
42 #include <crypto/aead.h>
44 #include <net/strparser.h>
47 noinline void tls_err_abort(struct sock *sk, int err)
49 WARN_ON_ONCE(err >= 0);
50 /* sk->sk_err should contain a positive error code. */
52 sk->sk_error_report(sk);
55 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
56 unsigned int recursion_level)
58 int start = skb_headlen(skb);
59 int i, chunk = start - offset;
60 struct sk_buff *frag_iter;
63 if (unlikely(recursion_level >= 24))
76 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
79 WARN_ON(start > offset + len);
81 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
95 if (unlikely(skb_has_frag_list(skb))) {
96 skb_walk_frags(skb, frag_iter) {
99 WARN_ON(start > offset + len);
101 end = start + frag_iter->len;
102 chunk = end - offset;
106 ret = __skb_nsg(frag_iter, offset - start, chunk,
107 recursion_level + 1);
108 if (unlikely(ret < 0))
123 /* Return the number of scatterlist elements required to completely map the
124 * skb, or -EMSGSIZE if the recursion depth is exceeded.
126 static int skb_nsg(struct sk_buff *skb, int offset, int len)
128 return __skb_nsg(skb, offset, len, 0);
131 static int padding_length(struct tls_sw_context_rx *ctx,
132 struct tls_prot_info *prot, struct sk_buff *skb)
134 struct strp_msg *rxm = strp_msg(skb);
137 /* Determine zero-padding length */
138 if (prot->version == TLS_1_3_VERSION) {
139 char content_type = 0;
143 while (content_type == 0) {
144 if (back > rxm->full_len - prot->prepend_size)
146 err = skb_copy_bits(skb,
147 rxm->offset + rxm->full_len - back,
156 ctx->control = content_type;
161 static void tls_decrypt_done(struct crypto_async_request *req, int err)
163 struct aead_request *aead_req = (struct aead_request *)req;
164 struct scatterlist *sgout = aead_req->dst;
165 struct scatterlist *sgin = aead_req->src;
166 struct tls_sw_context_rx *ctx;
167 struct tls_context *tls_ctx;
168 struct tls_prot_info *prot;
169 struct scatterlist *sg;
174 skb = (struct sk_buff *)req->data;
175 tls_ctx = tls_get_ctx(skb->sk);
176 ctx = tls_sw_ctx_rx(tls_ctx);
177 prot = &tls_ctx->prot_info;
179 /* Propagate if there was an err */
181 ctx->async_wait.err = err;
182 tls_err_abort(skb->sk, err);
184 struct strp_msg *rxm = strp_msg(skb);
187 pad = padding_length(ctx, prot, skb);
189 ctx->async_wait.err = pad;
190 tls_err_abort(skb->sk, pad);
192 rxm->full_len -= pad;
193 rxm->offset += prot->prepend_size;
194 rxm->full_len -= prot->overhead_size;
198 /* After using skb->sk to propagate sk through crypto async callback
199 * we need to NULL it again.
204 /* Free the destination pages if skb was not decrypted inplace */
206 /* Skip the first S/G entry as it points to AAD */
207 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
210 put_page(sg_page(sg));
216 spin_lock_bh(&ctx->decrypt_compl_lock);
217 pending = atomic_dec_return(&ctx->decrypt_pending);
219 if (!pending && ctx->async_notify)
220 complete(&ctx->async_wait.completion);
221 spin_unlock_bh(&ctx->decrypt_compl_lock);
224 static int tls_do_decryption(struct sock *sk,
226 struct scatterlist *sgin,
227 struct scatterlist *sgout,
230 struct aead_request *aead_req,
233 struct tls_context *tls_ctx = tls_get_ctx(sk);
234 struct tls_prot_info *prot = &tls_ctx->prot_info;
235 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
238 aead_request_set_tfm(aead_req, ctx->aead_recv);
239 aead_request_set_ad(aead_req, prot->aad_size);
240 aead_request_set_crypt(aead_req, sgin, sgout,
241 data_len + prot->tag_size,
245 /* Using skb->sk to push sk through to crypto async callback
246 * handler. This allows propagating errors up to the socket
247 * if needed. It _must_ be cleared in the async handler
248 * before consume_skb is called. We _know_ skb->sk is NULL
249 * because it is a clone from strparser.
252 aead_request_set_callback(aead_req,
253 CRYPTO_TFM_REQ_MAY_BACKLOG,
254 tls_decrypt_done, skb);
255 atomic_inc(&ctx->decrypt_pending);
257 aead_request_set_callback(aead_req,
258 CRYPTO_TFM_REQ_MAY_BACKLOG,
259 crypto_req_done, &ctx->async_wait);
262 ret = crypto_aead_decrypt(aead_req);
263 if (ret == -EINPROGRESS) {
267 ret = crypto_wait_req(ret, &ctx->async_wait);
271 atomic_dec(&ctx->decrypt_pending);
276 static void tls_trim_both_msgs(struct sock *sk, int target_size)
278 struct tls_context *tls_ctx = tls_get_ctx(sk);
279 struct tls_prot_info *prot = &tls_ctx->prot_info;
280 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
281 struct tls_rec *rec = ctx->open_rec;
283 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
285 target_size += prot->overhead_size;
286 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
289 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
291 struct tls_context *tls_ctx = tls_get_ctx(sk);
292 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
293 struct tls_rec *rec = ctx->open_rec;
294 struct sk_msg *msg_en = &rec->msg_encrypted;
296 return sk_msg_alloc(sk, msg_en, len, 0);
299 static int tls_clone_plaintext_msg(struct sock *sk, int required)
301 struct tls_context *tls_ctx = tls_get_ctx(sk);
302 struct tls_prot_info *prot = &tls_ctx->prot_info;
303 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
304 struct tls_rec *rec = ctx->open_rec;
305 struct sk_msg *msg_pl = &rec->msg_plaintext;
306 struct sk_msg *msg_en = &rec->msg_encrypted;
309 /* We add page references worth len bytes from encrypted sg
310 * at the end of plaintext sg. It is guaranteed that msg_en
311 * has enough required room (ensured by caller).
313 len = required - msg_pl->sg.size;
315 /* Skip initial bytes in msg_en's data to be able to use
316 * same offset of both plain and encrypted data.
318 skip = prot->prepend_size + msg_pl->sg.size;
320 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
323 static struct tls_rec *tls_get_rec(struct sock *sk)
325 struct tls_context *tls_ctx = tls_get_ctx(sk);
326 struct tls_prot_info *prot = &tls_ctx->prot_info;
327 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
328 struct sk_msg *msg_pl, *msg_en;
332 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
334 rec = kzalloc(mem_size, sk->sk_allocation);
338 msg_pl = &rec->msg_plaintext;
339 msg_en = &rec->msg_encrypted;
344 sg_init_table(rec->sg_aead_in, 2);
345 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
346 sg_unmark_end(&rec->sg_aead_in[1]);
348 sg_init_table(rec->sg_aead_out, 2);
349 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
350 sg_unmark_end(&rec->sg_aead_out[1]);
355 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
357 sk_msg_free(sk, &rec->msg_encrypted);
358 sk_msg_free(sk, &rec->msg_plaintext);
362 static void tls_free_open_rec(struct sock *sk)
364 struct tls_context *tls_ctx = tls_get_ctx(sk);
365 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
366 struct tls_rec *rec = ctx->open_rec;
369 tls_free_rec(sk, rec);
370 ctx->open_rec = NULL;
374 int tls_tx_records(struct sock *sk, int flags)
376 struct tls_context *tls_ctx = tls_get_ctx(sk);
377 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
378 struct tls_rec *rec, *tmp;
379 struct sk_msg *msg_en;
380 int tx_flags, rc = 0;
382 if (tls_is_partially_sent_record(tls_ctx)) {
383 rec = list_first_entry(&ctx->tx_list,
384 struct tls_rec, list);
387 tx_flags = rec->tx_flags;
391 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
395 /* Full record has been transmitted.
396 * Remove the head of tx_list
398 list_del(&rec->list);
399 sk_msg_free(sk, &rec->msg_plaintext);
403 /* Tx all ready records */
404 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
405 if (READ_ONCE(rec->tx_ready)) {
407 tx_flags = rec->tx_flags;
411 msg_en = &rec->msg_encrypted;
412 rc = tls_push_sg(sk, tls_ctx,
413 &msg_en->sg.data[msg_en->sg.curr],
418 list_del(&rec->list);
419 sk_msg_free(sk, &rec->msg_plaintext);
427 if (rc < 0 && rc != -EAGAIN)
428 tls_err_abort(sk, -EBADMSG);
433 static void tls_encrypt_done(struct crypto_async_request *req, int err)
435 struct aead_request *aead_req = (struct aead_request *)req;
436 struct sock *sk = req->data;
437 struct tls_context *tls_ctx = tls_get_ctx(sk);
438 struct tls_prot_info *prot = &tls_ctx->prot_info;
439 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
440 struct scatterlist *sge;
441 struct sk_msg *msg_en;
446 rec = container_of(aead_req, struct tls_rec, aead_req);
447 msg_en = &rec->msg_encrypted;
449 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
450 sge->offset -= prot->prepend_size;
451 sge->length += prot->prepend_size;
453 /* Check if error is previously set on socket */
454 if (err || sk->sk_err) {
457 /* If err is already set on socket, return the same code */
459 ctx->async_wait.err = -sk->sk_err;
461 ctx->async_wait.err = err;
462 tls_err_abort(sk, err);
467 struct tls_rec *first_rec;
469 /* Mark the record as ready for transmission */
470 smp_store_mb(rec->tx_ready, true);
472 /* If received record is at head of tx_list, schedule tx */
473 first_rec = list_first_entry(&ctx->tx_list,
474 struct tls_rec, list);
475 if (rec == first_rec)
479 spin_lock_bh(&ctx->encrypt_compl_lock);
480 pending = atomic_dec_return(&ctx->encrypt_pending);
482 if (!pending && ctx->async_notify)
483 complete(&ctx->async_wait.completion);
484 spin_unlock_bh(&ctx->encrypt_compl_lock);
489 /* Schedule the transmission */
490 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
491 schedule_delayed_work(&ctx->tx_work.work, 1);
494 static int tls_do_encryption(struct sock *sk,
495 struct tls_context *tls_ctx,
496 struct tls_sw_context_tx *ctx,
497 struct aead_request *aead_req,
498 size_t data_len, u32 start)
500 struct tls_prot_info *prot = &tls_ctx->prot_info;
501 struct tls_rec *rec = ctx->open_rec;
502 struct sk_msg *msg_en = &rec->msg_encrypted;
503 struct scatterlist *sge = sk_msg_elem(msg_en, start);
504 int rc, iv_offset = 0;
506 /* For CCM based ciphers, first byte of IV is a constant */
507 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
508 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
512 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
513 prot->iv_size + prot->salt_size);
515 xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
517 sge->offset += prot->prepend_size;
518 sge->length -= prot->prepend_size;
520 msg_en->sg.curr = start;
522 aead_request_set_tfm(aead_req, ctx->aead_send);
523 aead_request_set_ad(aead_req, prot->aad_size);
524 aead_request_set_crypt(aead_req, rec->sg_aead_in,
526 data_len, rec->iv_data);
528 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
529 tls_encrypt_done, sk);
531 /* Add the record in tx_list */
532 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
533 atomic_inc(&ctx->encrypt_pending);
535 rc = crypto_aead_encrypt(aead_req);
536 if (!rc || rc != -EINPROGRESS) {
537 atomic_dec(&ctx->encrypt_pending);
538 sge->offset -= prot->prepend_size;
539 sge->length += prot->prepend_size;
543 WRITE_ONCE(rec->tx_ready, true);
544 } else if (rc != -EINPROGRESS) {
545 list_del(&rec->list);
549 /* Unhook the record from context if encryption is not failure */
550 ctx->open_rec = NULL;
551 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
555 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
556 struct tls_rec **to, struct sk_msg *msg_opl,
557 struct sk_msg *msg_oen, u32 split_point,
558 u32 tx_overhead_size, u32 *orig_end)
560 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
561 struct scatterlist *sge, *osge, *nsge;
562 u32 orig_size = msg_opl->sg.size;
563 struct scatterlist tmp = { };
564 struct sk_msg *msg_npl;
568 new = tls_get_rec(sk);
571 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
572 tx_overhead_size, 0);
574 tls_free_rec(sk, new);
578 *orig_end = msg_opl->sg.end;
579 i = msg_opl->sg.start;
580 sge = sk_msg_elem(msg_opl, i);
581 while (apply && sge->length) {
582 if (sge->length > apply) {
583 u32 len = sge->length - apply;
585 get_page(sg_page(sge));
586 sg_set_page(&tmp, sg_page(sge), len,
587 sge->offset + apply);
592 apply -= sge->length;
593 bytes += sge->length;
596 sk_msg_iter_var_next(i);
597 if (i == msg_opl->sg.end)
599 sge = sk_msg_elem(msg_opl, i);
603 msg_opl->sg.curr = i;
604 msg_opl->sg.copybreak = 0;
605 msg_opl->apply_bytes = 0;
606 msg_opl->sg.size = bytes;
608 msg_npl = &new->msg_plaintext;
609 msg_npl->apply_bytes = apply;
610 msg_npl->sg.size = orig_size - bytes;
612 j = msg_npl->sg.start;
613 nsge = sk_msg_elem(msg_npl, j);
615 memcpy(nsge, &tmp, sizeof(*nsge));
616 sk_msg_iter_var_next(j);
617 nsge = sk_msg_elem(msg_npl, j);
620 osge = sk_msg_elem(msg_opl, i);
621 while (osge->length) {
622 memcpy(nsge, osge, sizeof(*nsge));
624 sk_msg_iter_var_next(i);
625 sk_msg_iter_var_next(j);
628 osge = sk_msg_elem(msg_opl, i);
629 nsge = sk_msg_elem(msg_npl, j);
633 msg_npl->sg.curr = j;
634 msg_npl->sg.copybreak = 0;
640 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
641 struct tls_rec *from, u32 orig_end)
643 struct sk_msg *msg_npl = &from->msg_plaintext;
644 struct sk_msg *msg_opl = &to->msg_plaintext;
645 struct scatterlist *osge, *nsge;
649 sk_msg_iter_var_prev(i);
650 j = msg_npl->sg.start;
652 osge = sk_msg_elem(msg_opl, i);
653 nsge = sk_msg_elem(msg_npl, j);
655 if (sg_page(osge) == sg_page(nsge) &&
656 osge->offset + osge->length == nsge->offset) {
657 osge->length += nsge->length;
658 put_page(sg_page(nsge));
661 msg_opl->sg.end = orig_end;
662 msg_opl->sg.curr = orig_end;
663 msg_opl->sg.copybreak = 0;
664 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
665 msg_opl->sg.size += msg_npl->sg.size;
667 sk_msg_free(sk, &to->msg_encrypted);
668 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
673 static int tls_push_record(struct sock *sk, int flags,
674 unsigned char record_type)
676 struct tls_context *tls_ctx = tls_get_ctx(sk);
677 struct tls_prot_info *prot = &tls_ctx->prot_info;
678 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
679 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
680 u32 i, split_point, uninitialized_var(orig_end);
681 struct sk_msg *msg_pl, *msg_en;
682 struct aead_request *req;
689 msg_pl = &rec->msg_plaintext;
690 msg_en = &rec->msg_encrypted;
692 split_point = msg_pl->apply_bytes;
693 split = split_point && split_point < msg_pl->sg.size;
694 if (unlikely((!split &&
696 prot->overhead_size > msg_en->sg.size) ||
699 prot->overhead_size > msg_en->sg.size))) {
701 split_point = msg_en->sg.size;
704 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
705 split_point, prot->overhead_size,
709 /* This can happen if above tls_split_open_record allocates
710 * a single large encryption buffer instead of two smaller
711 * ones. In this case adjust pointers and continue without
714 if (!msg_pl->sg.size) {
715 tls_merge_open_record(sk, rec, tmp, orig_end);
716 msg_pl = &rec->msg_plaintext;
717 msg_en = &rec->msg_encrypted;
720 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
721 prot->overhead_size);
724 rec->tx_flags = flags;
725 req = &rec->aead_req;
728 sk_msg_iter_var_prev(i);
730 rec->content_type = record_type;
731 if (prot->version == TLS_1_3_VERSION) {
732 /* Add content type to end of message. No padding added */
733 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
734 sg_mark_end(&rec->sg_content_type);
735 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
736 &rec->sg_content_type);
738 sg_mark_end(sk_msg_elem(msg_pl, i));
741 if (msg_pl->sg.end < msg_pl->sg.start) {
742 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
743 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
747 i = msg_pl->sg.start;
748 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
751 sk_msg_iter_var_prev(i);
752 sg_mark_end(sk_msg_elem(msg_en, i));
754 i = msg_en->sg.start;
755 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
757 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
758 tls_ctx->tx.rec_seq, prot->rec_seq_size,
759 record_type, prot->version);
761 tls_fill_prepend(tls_ctx,
762 page_address(sg_page(&msg_en->sg.data[i])) +
763 msg_en->sg.data[i].offset,
764 msg_pl->sg.size + prot->tail_size,
765 record_type, prot->version);
767 tls_ctx->pending_open_record_frags = false;
769 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
770 msg_pl->sg.size + prot->tail_size, i);
772 if (rc != -EINPROGRESS) {
773 tls_err_abort(sk, -EBADMSG);
775 tls_ctx->pending_open_record_frags = true;
776 tls_merge_open_record(sk, rec, tmp, orig_end);
779 ctx->async_capable = 1;
782 msg_pl = &tmp->msg_plaintext;
783 msg_en = &tmp->msg_encrypted;
784 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
785 tls_ctx->pending_open_record_frags = true;
789 return tls_tx_records(sk, flags);
792 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
793 bool full_record, u8 record_type,
794 ssize_t *copied, int flags)
796 struct tls_context *tls_ctx = tls_get_ctx(sk);
797 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
798 struct sk_msg msg_redir = { };
799 struct sk_psock *psock;
800 struct sock *sk_redir;
806 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
807 psock = sk_psock_get(sk);
808 if (!psock || !policy) {
809 err = tls_push_record(sk, flags, record_type);
810 if (err && sk->sk_err == EBADMSG) {
811 *copied -= sk_msg_free(sk, msg);
812 tls_free_open_rec(sk);
816 sk_psock_put(sk, psock);
820 enospc = sk_msg_full(msg);
821 if (psock->eval == __SK_NONE) {
822 delta = msg->sg.size;
823 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
824 delta -= msg->sg.size;
826 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
827 !enospc && !full_record) {
833 if (msg->apply_bytes && msg->apply_bytes < send)
834 send = msg->apply_bytes;
836 switch (psock->eval) {
838 err = tls_push_record(sk, flags, record_type);
839 if (err && sk->sk_err == EBADMSG) {
840 *copied -= sk_msg_free(sk, msg);
841 tls_free_open_rec(sk);
847 sk_redir = psock->sk_redir;
848 memcpy(&msg_redir, msg, sizeof(*msg));
849 if (msg->apply_bytes < send)
850 msg->apply_bytes = 0;
852 msg->apply_bytes -= send;
853 sk_msg_return_zero(sk, msg, send);
854 msg->sg.size -= send;
856 err = tcp_bpf_sendmsg_redir(sk_redir, &msg_redir, send, flags);
859 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
862 if (msg->sg.size == 0)
863 tls_free_open_rec(sk);
867 sk_msg_free_partial(sk, msg, send);
868 if (msg->apply_bytes < send)
869 msg->apply_bytes = 0;
871 msg->apply_bytes -= send;
872 if (msg->sg.size == 0)
873 tls_free_open_rec(sk);
874 *copied -= (send + delta);
879 bool reset_eval = !ctx->open_rec;
883 msg = &rec->msg_plaintext;
884 if (!msg->apply_bytes)
888 psock->eval = __SK_NONE;
889 if (psock->sk_redir) {
890 sock_put(psock->sk_redir);
891 psock->sk_redir = NULL;
898 sk_psock_put(sk, psock);
902 static int tls_sw_push_pending_record(struct sock *sk, int flags)
904 struct tls_context *tls_ctx = tls_get_ctx(sk);
905 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
906 struct tls_rec *rec = ctx->open_rec;
907 struct sk_msg *msg_pl;
913 msg_pl = &rec->msg_plaintext;
914 copied = msg_pl->sg.size;
918 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
922 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
924 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
925 struct tls_context *tls_ctx = tls_get_ctx(sk);
926 struct tls_prot_info *prot = &tls_ctx->prot_info;
927 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
928 bool async_capable = ctx->async_capable;
929 unsigned char record_type = TLS_RECORD_TYPE_DATA;
930 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
931 bool eor = !(msg->msg_flags & MSG_MORE);
934 struct sk_msg *msg_pl, *msg_en;
945 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
948 mutex_lock(&tls_ctx->tx_lock);
951 if (unlikely(msg->msg_controllen)) {
952 ret = tls_proccess_cmsg(sk, msg, &record_type);
954 if (ret == -EINPROGRESS)
956 else if (ret != -EAGAIN)
961 while (msg_data_left(msg)) {
970 rec = ctx->open_rec = tls_get_rec(sk);
976 msg_pl = &rec->msg_plaintext;
977 msg_en = &rec->msg_encrypted;
979 orig_size = msg_pl->sg.size;
981 try_to_copy = msg_data_left(msg);
982 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
983 if (try_to_copy >= record_room) {
984 try_to_copy = record_room;
988 required_size = msg_pl->sg.size + try_to_copy +
991 if (!sk_stream_memory_free(sk))
992 goto wait_for_sndbuf;
995 ret = tls_alloc_encrypted_msg(sk, required_size);
998 goto wait_for_memory;
1000 /* Adjust try_to_copy according to the amount that was
1001 * actually allocated. The difference is due
1002 * to max sg elements limit
1004 try_to_copy -= required_size - msg_en->sg.size;
1008 if (!is_kvec && (full_record || eor) && !async_capable) {
1009 u32 first = msg_pl->sg.end;
1011 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1012 msg_pl, try_to_copy);
1014 goto fallback_to_reg_send;
1017 copied += try_to_copy;
1019 sk_msg_sg_copy_set(msg_pl, first);
1020 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1021 record_type, &copied,
1024 if (ret == -EINPROGRESS)
1026 else if (ret == -ENOMEM)
1027 goto wait_for_memory;
1028 else if (ctx->open_rec && ret == -ENOSPC)
1030 else if (ret != -EAGAIN)
1035 copied -= try_to_copy;
1036 sk_msg_sg_copy_clear(msg_pl, first);
1037 iov_iter_revert(&msg->msg_iter,
1038 msg_pl->sg.size - orig_size);
1039 fallback_to_reg_send:
1040 sk_msg_trim(sk, msg_pl, orig_size);
1043 required_size = msg_pl->sg.size + try_to_copy;
1045 ret = tls_clone_plaintext_msg(sk, required_size);
1050 /* Adjust try_to_copy according to the amount that was
1051 * actually allocated. The difference is due
1052 * to max sg elements limit
1054 try_to_copy -= required_size - msg_pl->sg.size;
1056 sk_msg_trim(sk, msg_en,
1057 msg_pl->sg.size + prot->overhead_size);
1061 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1062 msg_pl, try_to_copy);
1067 /* Open records defined only if successfully copied, otherwise
1068 * we would trim the sg but not reset the open record frags.
1070 tls_ctx->pending_open_record_frags = true;
1071 copied += try_to_copy;
1072 if (full_record || eor) {
1073 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1074 record_type, &copied,
1077 if (ret == -EINPROGRESS)
1079 else if (ret == -ENOMEM)
1080 goto wait_for_memory;
1081 else if (ret != -EAGAIN) {
1092 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1094 ret = sk_stream_wait_memory(sk, &timeo);
1098 tls_trim_both_msgs(sk, orig_size);
1102 if (ctx->open_rec && msg_en->sg.size < required_size)
1103 goto alloc_encrypted;
1108 } else if (num_zc) {
1109 /* Wait for pending encryptions to get completed */
1110 spin_lock_bh(&ctx->encrypt_compl_lock);
1111 ctx->async_notify = true;
1113 pending = atomic_read(&ctx->encrypt_pending);
1114 spin_unlock_bh(&ctx->encrypt_compl_lock);
1116 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1118 reinit_completion(&ctx->async_wait.completion);
1120 /* There can be no concurrent accesses, since we have no
1121 * pending encrypt operations
1123 WRITE_ONCE(ctx->async_notify, false);
1125 if (ctx->async_wait.err) {
1126 ret = ctx->async_wait.err;
1131 /* Transmit if any encryptions have completed */
1132 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1133 cancel_delayed_work(&ctx->tx_work.work);
1134 tls_tx_records(sk, msg->msg_flags);
1138 ret = sk_stream_error(sk, msg->msg_flags, ret);
1141 mutex_unlock(&tls_ctx->tx_lock);
1142 return copied > 0 ? copied : ret;
1145 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1146 int offset, size_t size, int flags)
1148 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1149 struct tls_context *tls_ctx = tls_get_ctx(sk);
1150 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1151 struct tls_prot_info *prot = &tls_ctx->prot_info;
1152 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1153 struct sk_msg *msg_pl;
1154 struct tls_rec *rec;
1162 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1163 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1165 /* Call the sk_stream functions to manage the sndbuf mem. */
1167 size_t copy, required_size;
1175 rec = ctx->open_rec;
1177 rec = ctx->open_rec = tls_get_rec(sk);
1183 msg_pl = &rec->msg_plaintext;
1185 full_record = false;
1186 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1188 if (copy >= record_room) {
1193 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1195 if (!sk_stream_memory_free(sk))
1196 goto wait_for_sndbuf;
1198 ret = tls_alloc_encrypted_msg(sk, required_size);
1201 goto wait_for_memory;
1203 /* Adjust copy according to the amount that was
1204 * actually allocated. The difference is due
1205 * to max sg elements limit
1207 copy -= required_size - msg_pl->sg.size;
1211 sk_msg_page_add(msg_pl, page, copy, offset);
1212 sk_mem_charge(sk, copy);
1218 tls_ctx->pending_open_record_frags = true;
1219 if (full_record || eor || sk_msg_full(msg_pl)) {
1220 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1221 record_type, &copied, flags);
1223 if (ret == -EINPROGRESS)
1225 else if (ret == -ENOMEM)
1226 goto wait_for_memory;
1227 else if (ret != -EAGAIN) {
1236 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1238 ret = sk_stream_wait_memory(sk, &timeo);
1241 tls_trim_both_msgs(sk, msg_pl->sg.size);
1250 /* Transmit if any encryptions have completed */
1251 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1252 cancel_delayed_work(&ctx->tx_work.work);
1253 tls_tx_records(sk, flags);
1257 ret = sk_stream_error(sk, flags, ret);
1258 return copied > 0 ? copied : ret;
1261 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1262 int offset, size_t size, int flags)
1264 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1265 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1266 MSG_NO_SHARED_FRAGS))
1269 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1272 int tls_sw_sendpage(struct sock *sk, struct page *page,
1273 int offset, size_t size, int flags)
1275 struct tls_context *tls_ctx = tls_get_ctx(sk);
1278 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1279 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1282 mutex_lock(&tls_ctx->tx_lock);
1284 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1286 mutex_unlock(&tls_ctx->tx_lock);
1290 static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1291 bool nonblock, long timeo, int *err)
1293 struct tls_context *tls_ctx = tls_get_ctx(sk);
1294 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1295 struct sk_buff *skb;
1296 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1298 while (!(skb = ctx->recv_pkt) && sk_psock_queue_empty(psock)) {
1300 *err = sock_error(sk);
1304 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1305 __strp_unpause(&ctx->strp);
1307 return ctx->recv_pkt;
1310 if (sk->sk_shutdown & RCV_SHUTDOWN)
1313 if (sock_flag(sk, SOCK_DONE))
1316 if (nonblock || !timeo) {
1321 add_wait_queue(sk_sleep(sk), &wait);
1322 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1323 sk_wait_event(sk, &timeo,
1324 ctx->recv_pkt != skb ||
1325 !sk_psock_queue_empty(psock),
1327 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1328 remove_wait_queue(sk_sleep(sk), &wait);
1330 /* Handle signals */
1331 if (signal_pending(current)) {
1332 *err = sock_intr_errno(timeo);
1340 static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
1341 int length, int *pages_used,
1342 unsigned int *size_used,
1343 struct scatterlist *to,
1346 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1347 struct page *pages[MAX_SKB_FRAGS];
1348 unsigned int size = *size_used;
1349 ssize_t copied, use;
1352 while (length > 0) {
1354 maxpages = to_max_pages - num_elem;
1355 if (maxpages == 0) {
1359 copied = iov_iter_get_pages(from, pages,
1367 iov_iter_advance(from, copied);
1372 use = min_t(int, copied, PAGE_SIZE - offset);
1374 sg_set_page(&to[num_elem],
1375 pages[i], use, offset);
1376 sg_unmark_end(&to[num_elem]);
1377 /* We do not uncharge memory from this API */
1386 /* Mark the end in the last sg entry if newly added */
1387 if (num_elem > *pages_used)
1388 sg_mark_end(&to[num_elem - 1]);
1391 iov_iter_revert(from, size - *size_used);
1393 *pages_used = num_elem;
1398 /* This function decrypts the input skb into either out_iov or in out_sg
1399 * or in skb buffers itself. The input parameter 'zc' indicates if
1400 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1401 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1402 * NULL, then the decryption happens inside skb buffers itself, i.e.
1403 * zero-copy gets disabled and 'zc' is updated.
1406 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1407 struct iov_iter *out_iov,
1408 struct scatterlist *out_sg,
1409 int *chunk, bool *zc, bool async)
1411 struct tls_context *tls_ctx = tls_get_ctx(sk);
1412 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1413 struct tls_prot_info *prot = &tls_ctx->prot_info;
1414 struct strp_msg *rxm = strp_msg(skb);
1415 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1416 struct aead_request *aead_req;
1417 struct sk_buff *unused;
1418 u8 *aad, *iv, *mem = NULL;
1419 struct scatterlist *sgin = NULL;
1420 struct scatterlist *sgout = NULL;
1421 const int data_len = rxm->full_len - prot->overhead_size +
1425 if (*zc && (out_iov || out_sg)) {
1427 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1429 n_sgout = sg_nents(out_sg);
1430 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1431 rxm->full_len - prot->prepend_size);
1435 n_sgin = skb_cow_data(skb, 0, &unused);
1441 /* Increment to accommodate AAD */
1442 n_sgin = n_sgin + 1;
1444 nsg = n_sgin + n_sgout;
1446 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1447 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1448 mem_size = mem_size + prot->aad_size;
1449 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1451 /* Allocate a single block of memory which contains
1452 * aead_req || sgin[] || sgout[] || aad || iv.
1453 * This order achieves correct alignment for aead_req, sgin, sgout.
1455 mem = kmalloc(mem_size, sk->sk_allocation);
1459 /* Segment the allocated memory */
1460 aead_req = (struct aead_request *)mem;
1461 sgin = (struct scatterlist *)(mem + aead_size);
1462 sgout = sgin + n_sgin;
1463 aad = (u8 *)(sgout + n_sgout);
1464 iv = aad + prot->aad_size;
1466 /* For CCM based ciphers, first byte of nonce+iv is always '2' */
1467 if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
1473 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1474 iv + iv_offset + prot->salt_size,
1480 if (prot->version == TLS_1_3_VERSION)
1481 memcpy(iv + iv_offset, tls_ctx->rx.iv,
1482 prot->iv_size + prot->salt_size);
1484 memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
1486 xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq);
1489 tls_make_aad(aad, rxm->full_len - prot->overhead_size +
1491 tls_ctx->rx.rec_seq, prot->rec_seq_size,
1492 ctx->control, prot->version);
1495 sg_init_table(sgin, n_sgin);
1496 sg_set_buf(&sgin[0], aad, prot->aad_size);
1497 err = skb_to_sgvec(skb, &sgin[1],
1498 rxm->offset + prot->prepend_size,
1499 rxm->full_len - prot->prepend_size);
1507 sg_init_table(sgout, n_sgout);
1508 sg_set_buf(&sgout[0], aad, prot->aad_size);
1511 err = tls_setup_from_iter(sk, out_iov, data_len,
1512 &pages, chunk, &sgout[1],
1515 goto fallback_to_reg_recv;
1516 } else if (out_sg) {
1517 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1519 goto fallback_to_reg_recv;
1522 fallback_to_reg_recv:
1529 /* Prepare and submit AEAD request */
1530 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1531 data_len, aead_req, async);
1532 if (err == -EINPROGRESS)
1535 /* Release the pages in case iov was mapped to pages */
1536 for (; pages > 0; pages--)
1537 put_page(sg_page(&sgout[pages]));
1543 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1544 struct iov_iter *dest, int *chunk, bool *zc,
1547 struct tls_context *tls_ctx = tls_get_ctx(sk);
1548 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1549 struct tls_prot_info *prot = &tls_ctx->prot_info;
1550 struct strp_msg *rxm = strp_msg(skb);
1553 if (!ctx->decrypted) {
1554 if (tls_ctx->rx_conf == TLS_HW) {
1555 err = tls_device_decrypted(sk, skb);
1560 /* Still not decrypted after tls_device */
1561 if (!ctx->decrypted) {
1562 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1565 if (err == -EINPROGRESS)
1566 tls_advance_record_sn(sk, prot,
1575 pad = padding_length(ctx, prot, skb);
1579 rxm->full_len -= pad;
1580 rxm->offset += prot->prepend_size;
1581 rxm->full_len -= prot->overhead_size;
1582 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1583 ctx->decrypted = true;
1584 ctx->saved_data_ready(sk);
1592 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1593 struct scatterlist *sgout)
1598 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
1601 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1604 struct tls_context *tls_ctx = tls_get_ctx(sk);
1605 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1608 struct strp_msg *rxm = strp_msg(skb);
1610 if (len < rxm->full_len) {
1612 rxm->full_len -= len;
1618 /* Finished with message */
1619 ctx->recv_pkt = NULL;
1620 __strp_unpause(&ctx->strp);
1625 /* This function traverses the rx_list in tls receive context to copies the
1626 * decrypted records into the buffer provided by caller zero copy is not
1627 * true. Further, the records are removed from the rx_list if it is not a peek
1628 * case and the record has been consumed completely.
1630 static int process_rx_list(struct tls_sw_context_rx *ctx,
1639 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1642 struct tls_msg *tlm;
1645 /* Set the record type in 'control' if caller didn't pass it */
1648 ctrl = tlm->control;
1651 while (skip && skb) {
1652 struct strp_msg *rxm = strp_msg(skb);
1655 /* Cannot process a record of different type */
1656 if (ctrl != tlm->control)
1659 if (skip < rxm->full_len)
1662 skip = skip - rxm->full_len;
1663 skb = skb_peek_next(skb, &ctx->rx_list);
1666 while (len && skb) {
1667 struct sk_buff *next_skb;
1668 struct strp_msg *rxm = strp_msg(skb);
1669 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1673 /* Cannot process a record of different type */
1674 if (ctrl != tlm->control)
1677 /* Set record type if not already done. For a non-data record,
1678 * do not proceed if record type could not be copied.
1681 int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1682 sizeof(ctrl), &ctrl);
1684 if (ctrl != TLS_RECORD_TYPE_DATA) {
1685 if (cerr || msg->msg_flags & MSG_CTRUNC)
1692 if (!zc || (rxm->full_len - skip) > len) {
1693 int err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1700 copied = copied + chunk;
1702 /* Consume the data from record if it is non-peek case*/
1704 rxm->offset = rxm->offset + chunk;
1705 rxm->full_len = rxm->full_len - chunk;
1707 /* Return if there is unconsumed data in the record */
1708 if (rxm->full_len - skip)
1712 /* The remaining skip-bytes must lie in 1st record in rx_list.
1713 * So from the 2nd record, 'skip' should be 0.
1718 msg->msg_flags |= MSG_EOR;
1720 next_skb = skb_peek_next(skb, &ctx->rx_list);
1723 skb_unlink(skb, &ctx->rx_list);
1734 int tls_sw_recvmsg(struct sock *sk,
1741 struct tls_context *tls_ctx = tls_get_ctx(sk);
1742 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1743 struct tls_prot_info *prot = &tls_ctx->prot_info;
1744 struct sk_psock *psock;
1745 unsigned char control = 0;
1746 ssize_t decrypted = 0;
1747 struct strp_msg *rxm;
1748 struct tls_msg *tlm;
1749 struct sk_buff *skb;
1752 int target, err = 0;
1754 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1755 bool is_peek = flags & MSG_PEEK;
1756 bool bpf_strp_enabled;
1762 if (unlikely(flags & MSG_ERRQUEUE))
1763 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1765 psock = sk_psock_get(sk);
1767 bpf_strp_enabled = sk_psock_strp_enabled(psock);
1769 /* Process pending decrypted records. It must be non-zero-copy */
1770 err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
1773 tls_err_abort(sk, err);
1782 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1784 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1786 while (len && (decrypted + copied < target || ctx->recv_pkt)) {
1787 bool retain_skb = false;
1794 skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err);
1797 int ret = __tcp_bpf_recvmsg(sk, psock,
1809 if (prot->version == TLS_1_3_VERSION)
1812 tlm->control = ctx->control;
1815 rxm = strp_msg(skb);
1817 to_decrypt = rxm->full_len - prot->overhead_size;
1819 if (to_decrypt <= len && !is_kvec && !is_peek &&
1820 ctx->control == TLS_RECORD_TYPE_DATA &&
1821 prot->version != TLS_1_3_VERSION &&
1825 /* Do not use async mode if record is non-data */
1826 if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
1827 async_capable = ctx->async_capable;
1829 async_capable = false;
1831 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1832 &chunk, &zc, async_capable);
1833 if (err < 0 && err != -EINPROGRESS) {
1834 tls_err_abort(sk, -EBADMSG);
1838 if (err == -EINPROGRESS) {
1841 } else if (prot->version == TLS_1_3_VERSION) {
1842 tlm->control = ctx->control;
1845 /* If the type of records being processed is not known yet,
1846 * set it to record type just dequeued. If it is already known,
1847 * but does not match the record type just dequeued, go to end.
1848 * We always get record type here since for tls1.2, record type
1849 * is known just after record is dequeued from stream parser.
1850 * For tls1.3, we disable async.
1854 control = tlm->control;
1855 else if (control != tlm->control)
1861 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1862 sizeof(control), &control);
1864 if (control != TLS_RECORD_TYPE_DATA) {
1865 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1873 goto pick_next_record;
1876 if (bpf_strp_enabled) {
1877 err = sk_psock_tls_strp_read(psock, skb);
1878 if (err != __SK_PASS) {
1879 rxm->offset = rxm->offset + rxm->full_len;
1881 if (err == __SK_DROP)
1883 ctx->recv_pkt = NULL;
1884 __strp_unpause(&ctx->strp);
1889 if (rxm->full_len > len) {
1893 chunk = rxm->full_len;
1896 err = skb_copy_datagram_msg(skb, rxm->offset,
1902 rxm->offset = rxm->offset + chunk;
1903 rxm->full_len = rxm->full_len - chunk;
1914 /* For async or peek case, queue the current skb */
1915 if (async || is_peek || retain_skb) {
1916 skb_queue_tail(&ctx->rx_list, skb);
1920 if (tls_sw_advance_skb(sk, skb, chunk)) {
1921 /* Return full control message to
1922 * userspace before trying to parse
1923 * another message type
1925 msg->msg_flags |= MSG_EOR;
1926 if (control != TLS_RECORD_TYPE_DATA)
1935 /* Wait for all previously submitted records to be decrypted */
1936 spin_lock_bh(&ctx->decrypt_compl_lock);
1937 ctx->async_notify = true;
1938 pending = atomic_read(&ctx->decrypt_pending);
1939 spin_unlock_bh(&ctx->decrypt_compl_lock);
1941 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1943 /* one of async decrypt failed */
1944 tls_err_abort(sk, err);
1950 reinit_completion(&ctx->async_wait.completion);
1953 /* There can be no concurrent accesses, since we have no
1954 * pending decrypt operations
1956 WRITE_ONCE(ctx->async_notify, false);
1958 /* Drain records from the rx_list & copy if required */
1959 if (is_peek || is_kvec)
1960 err = process_rx_list(ctx, msg, &control, &cmsg, copied,
1961 decrypted, false, is_peek);
1963 err = process_rx_list(ctx, msg, &control, &cmsg, 0,
1964 decrypted, true, is_peek);
1966 tls_err_abort(sk, err);
1972 copied += decrypted;
1977 sk_psock_put(sk, psock);
1978 return copied ? : err;
1981 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1982 struct pipe_inode_info *pipe,
1983 size_t len, unsigned int flags)
1985 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1986 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1987 struct strp_msg *rxm = NULL;
1988 struct sock *sk = sock->sk;
1989 struct sk_buff *skb;
1998 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
2000 skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
2002 goto splice_read_end;
2004 if (!ctx->decrypted) {
2005 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
2007 /* splice does not support reading control messages */
2008 if (ctx->control != TLS_RECORD_TYPE_DATA) {
2010 goto splice_read_end;
2014 tls_err_abort(sk, -EBADMSG);
2015 goto splice_read_end;
2017 ctx->decrypted = true;
2019 rxm = strp_msg(skb);
2021 chunk = min_t(unsigned int, rxm->full_len, len);
2022 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2024 goto splice_read_end;
2026 if (likely(!(flags & MSG_PEEK)))
2027 tls_sw_advance_skb(sk, skb, copied);
2031 return copied ? : err;
2034 bool tls_sw_stream_read(const struct sock *sk)
2036 struct tls_context *tls_ctx = tls_get_ctx(sk);
2037 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2038 bool ingress_empty = true;
2039 struct sk_psock *psock;
2042 psock = sk_psock(sk);
2044 ingress_empty = list_empty(&psock->ingress_msg);
2047 return !ingress_empty || ctx->recv_pkt ||
2048 !skb_queue_empty(&ctx->rx_list);
2051 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2053 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2054 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2055 struct tls_prot_info *prot = &tls_ctx->prot_info;
2056 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2057 struct strp_msg *rxm = strp_msg(skb);
2058 size_t cipher_overhead;
2059 size_t data_len = 0;
2062 /* Verify that we have a full TLS header, or wait for more data */
2063 if (rxm->offset + prot->prepend_size > skb->len)
2066 /* Sanity-check size of on-stack buffer. */
2067 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2072 /* Linearize header to local buffer */
2073 ret = skb_copy_bits(skb, rxm->offset, header, prot->prepend_size);
2078 ctx->control = header[0];
2080 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2082 cipher_overhead = prot->tag_size;
2083 if (prot->version != TLS_1_3_VERSION)
2084 cipher_overhead += prot->iv_size;
2086 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2091 if (data_len < cipher_overhead) {
2096 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2097 if (header[1] != TLS_1_2_VERSION_MINOR ||
2098 header[2] != TLS_1_2_VERSION_MAJOR) {
2103 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2104 TCP_SKB_CB(skb)->seq + rxm->offset);
2105 return data_len + TLS_HEADER_SIZE;
2108 tls_err_abort(strp->sk, ret);
2113 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
2115 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2116 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2118 ctx->decrypted = false;
2120 ctx->recv_pkt = skb;
2123 ctx->saved_data_ready(strp->sk);
2126 static void tls_data_ready(struct sock *sk)
2128 struct tls_context *tls_ctx = tls_get_ctx(sk);
2129 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2130 struct sk_psock *psock;
2132 strp_data_ready(&ctx->strp);
2134 psock = sk_psock_get(sk);
2136 if (!list_empty(&psock->ingress_msg))
2137 ctx->saved_data_ready(sk);
2138 sk_psock_put(sk, psock);
2142 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2144 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2146 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2147 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2148 cancel_delayed_work_sync(&ctx->tx_work.work);
2151 void tls_sw_release_resources_tx(struct sock *sk)
2153 struct tls_context *tls_ctx = tls_get_ctx(sk);
2154 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2155 struct tls_rec *rec, *tmp;
2158 /* Wait for any pending async encryptions to complete */
2159 spin_lock_bh(&ctx->encrypt_compl_lock);
2160 ctx->async_notify = true;
2161 pending = atomic_read(&ctx->encrypt_pending);
2162 spin_unlock_bh(&ctx->encrypt_compl_lock);
2165 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2167 tls_tx_records(sk, -1);
2169 /* Free up un-sent records in tx_list. First, free
2170 * the partially sent record if any at head of tx_list.
2172 if (tls_ctx->partially_sent_record) {
2173 tls_free_partial_record(sk, tls_ctx);
2174 rec = list_first_entry(&ctx->tx_list,
2175 struct tls_rec, list);
2176 list_del(&rec->list);
2177 sk_msg_free(sk, &rec->msg_plaintext);
2181 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2182 list_del(&rec->list);
2183 sk_msg_free(sk, &rec->msg_encrypted);
2184 sk_msg_free(sk, &rec->msg_plaintext);
2188 crypto_free_aead(ctx->aead_send);
2189 tls_free_open_rec(sk);
2192 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2194 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2199 void tls_sw_release_resources_rx(struct sock *sk)
2201 struct tls_context *tls_ctx = tls_get_ctx(sk);
2202 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2204 kfree(tls_ctx->rx.rec_seq);
2205 kfree(tls_ctx->rx.iv);
2207 if (ctx->aead_recv) {
2208 kfree_skb(ctx->recv_pkt);
2209 ctx->recv_pkt = NULL;
2210 skb_queue_purge(&ctx->rx_list);
2211 crypto_free_aead(ctx->aead_recv);
2212 strp_stop(&ctx->strp);
2213 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2214 * we still want to strp_stop(), but sk->sk_data_ready was
2217 if (ctx->saved_data_ready) {
2218 write_lock_bh(&sk->sk_callback_lock);
2219 sk->sk_data_ready = ctx->saved_data_ready;
2220 write_unlock_bh(&sk->sk_callback_lock);
2225 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2227 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2229 strp_done(&ctx->strp);
2232 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2234 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2239 void tls_sw_free_resources_rx(struct sock *sk)
2241 struct tls_context *tls_ctx = tls_get_ctx(sk);
2243 tls_sw_release_resources_rx(sk);
2244 tls_sw_free_ctx_rx(tls_ctx);
2247 /* The work handler to transmitt the encrypted records in tx_list */
2248 static void tx_work_handler(struct work_struct *work)
2250 struct delayed_work *delayed_work = to_delayed_work(work);
2251 struct tx_work *tx_work = container_of(delayed_work,
2252 struct tx_work, work);
2253 struct sock *sk = tx_work->sk;
2254 struct tls_context *tls_ctx = tls_get_ctx(sk);
2255 struct tls_sw_context_tx *ctx;
2257 if (unlikely(!tls_ctx))
2260 ctx = tls_sw_ctx_tx(tls_ctx);
2261 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2264 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2266 mutex_lock(&tls_ctx->tx_lock);
2268 tls_tx_records(sk, -1);
2270 mutex_unlock(&tls_ctx->tx_lock);
2273 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2275 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2277 /* Schedule the transmission if tx list is ready */
2278 if (is_tx_ready(tx_ctx) &&
2279 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2280 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2283 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2285 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2287 write_lock_bh(&sk->sk_callback_lock);
2288 rx_ctx->saved_data_ready = sk->sk_data_ready;
2289 sk->sk_data_ready = tls_data_ready;
2290 write_unlock_bh(&sk->sk_callback_lock);
2292 strp_check_rcv(&rx_ctx->strp);
2295 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2297 struct tls_context *tls_ctx = tls_get_ctx(sk);
2298 struct tls_prot_info *prot = &tls_ctx->prot_info;
2299 struct tls_crypto_info *crypto_info;
2300 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2301 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2302 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2303 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2304 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2305 struct cipher_context *cctx;
2306 struct crypto_aead **aead;
2307 struct strp_callbacks cb;
2308 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2309 struct crypto_tfm *tfm;
2310 char *iv, *rec_seq, *key, *salt, *cipher_name;
2320 if (!ctx->priv_ctx_tx) {
2321 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2326 ctx->priv_ctx_tx = sw_ctx_tx;
2329 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2332 if (!ctx->priv_ctx_rx) {
2333 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2338 ctx->priv_ctx_rx = sw_ctx_rx;
2341 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2346 crypto_init_wait(&sw_ctx_tx->async_wait);
2347 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2348 crypto_info = &ctx->crypto_send.info;
2350 aead = &sw_ctx_tx->aead_send;
2351 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2352 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2353 sw_ctx_tx->tx_work.sk = sk;
2355 crypto_init_wait(&sw_ctx_rx->async_wait);
2356 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2357 crypto_info = &ctx->crypto_recv.info;
2359 skb_queue_head_init(&sw_ctx_rx->rx_list);
2360 aead = &sw_ctx_rx->aead_recv;
2363 switch (crypto_info->cipher_type) {
2364 case TLS_CIPHER_AES_GCM_128: {
2365 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2366 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2367 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2368 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
2369 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2371 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
2373 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
2374 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2375 key = gcm_128_info->key;
2376 salt = gcm_128_info->salt;
2377 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2378 cipher_name = "gcm(aes)";
2381 case TLS_CIPHER_AES_GCM_256: {
2382 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2383 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2384 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2385 iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
2386 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2388 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
2390 (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
2391 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2392 key = gcm_256_info->key;
2393 salt = gcm_256_info->salt;
2394 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2395 cipher_name = "gcm(aes)";
2398 case TLS_CIPHER_AES_CCM_128: {
2399 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2400 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2401 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2402 iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
2403 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2405 ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
2407 (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
2408 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2409 key = ccm_128_info->key;
2410 salt = ccm_128_info->salt;
2411 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2412 cipher_name = "ccm(aes)";
2420 /* Sanity-check the sizes for stack allocations. */
2421 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2422 rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
2427 if (crypto_info->version == TLS_1_3_VERSION) {
2429 prot->aad_size = TLS_HEADER_SIZE;
2430 prot->tail_size = 1;
2432 prot->aad_size = TLS_AAD_SPACE_SIZE;
2433 prot->tail_size = 0;
2436 prot->version = crypto_info->version;
2437 prot->cipher_type = crypto_info->cipher_type;
2438 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2439 prot->tag_size = tag_size;
2440 prot->overhead_size = prot->prepend_size +
2441 prot->tag_size + prot->tail_size;
2442 prot->iv_size = iv_size;
2443 prot->salt_size = salt_size;
2444 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2449 /* Note: 128 & 256 bit salt are the same size */
2450 prot->rec_seq_size = rec_seq_size;
2451 memcpy(cctx->iv, salt, salt_size);
2452 memcpy(cctx->iv + salt_size, iv, iv_size);
2453 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2454 if (!cctx->rec_seq) {
2460 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2461 if (IS_ERR(*aead)) {
2462 rc = PTR_ERR(*aead);
2468 ctx->push_pending_record = tls_sw_push_pending_record;
2470 rc = crypto_aead_setkey(*aead, key, keysize);
2475 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2480 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2482 if (crypto_info->version == TLS_1_3_VERSION)
2483 sw_ctx_rx->async_capable = false;
2485 sw_ctx_rx->async_capable =
2486 tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
2488 /* Set up strparser */
2489 memset(&cb, 0, sizeof(cb));
2490 cb.rcv_msg = tls_queue;
2491 cb.parse_msg = tls_read_size;
2493 strp_init(&sw_ctx_rx->strp, sk, &cb);
2499 crypto_free_aead(*aead);
2502 kfree(cctx->rec_seq);
2503 cctx->rec_seq = NULL;
2509 kfree(ctx->priv_ctx_tx);
2510 ctx->priv_ctx_tx = NULL;
2512 kfree(ctx->priv_ctx_rx);
2513 ctx->priv_ctx_rx = NULL;