1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
29 struct page_frag *pfrag = sk_page_frag(sk);
34 struct scatterlist *sge;
38 if (!sk_page_frag_refill(sk, pfrag))
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
55 if (sk_msg_full(msg)) {
60 sge = &msg->sg.data[msg->sg.end];
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
67 sk_mem_charge(sk, use);
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
82 struct scatterlist *sgd = NULL;
86 if (sge->length > off)
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
92 sge = sk_msg_elem(src, i);
96 sge_len = sge->length - off;
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
121 sge = sk_msg_elem(src, i);
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
130 int i = msg->sg.start;
133 struct scatterlist *sge = sk_msg_elem(msg, i);
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
154 int i = msg->sg.start;
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
160 sk_mem_uncharge(sk, uncharge);
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
165 EXPORT_SYMBOL_GPL(sk_msg_return);
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
173 /* When the skb owns the memory we free it from consume_skb path. */
176 sk_mem_uncharge(sk, len);
177 put_page(sg_page(sge));
179 memset(sge, 0, sizeof(*sge));
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
186 struct scatterlist *sge = sk_msg_elem(msg, i);
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
196 consume_skb(msg->skb);
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
211 EXPORT_SYMBOL_GPL(sk_msg_free);
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
220 sge = sk_msg_elem(msg, i);
223 if (bytes < sge->length) {
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
243 __sk_msg_free_partial(sk, msg, bytes, true);
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
250 __sk_msg_free_partial(sk, msg, bytes, false);
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
255 int trim = msg->sg.size - len;
263 sk_msg_iter_var_prev(i);
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
280 sk_msg_iter_var_next(i);
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
296 msg->sg.copybreak = msg->sg.data[i].length;
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
312 maxpages = to_max_pages - num_elems;
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
325 iov_iter_advance(from, copied);
327 msg->sg.size += copied;
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
338 sk_msg_iter_next(msg, end);
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
354 iov_iter_revert(from, msg->sg.size - orig);
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
375 sge = sk_msg_elem(msg, i);
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
385 ret = copy_from_iter(to, copy, from);
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
402 int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
403 long timeo, int *err)
405 DEFINE_WAIT_FUNC(wait, woken_wake_function);
408 if (sk->sk_shutdown & RCV_SHUTDOWN)
414 add_wait_queue(sk_sleep(sk), &wait);
415 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
416 ret = sk_wait_event(sk, &timeo,
417 !list_empty(&psock->ingress_msg) ||
418 !skb_queue_empty(&sk->sk_receive_queue), &wait);
419 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
420 remove_wait_queue(sk_sleep(sk), &wait);
423 EXPORT_SYMBOL_GPL(sk_msg_wait_data);
425 /* Receive sk_msg from psock->ingress_msg to @msg. */
426 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
429 struct iov_iter *iter = &msg->msg_iter;
430 int peek = flags & MSG_PEEK;
431 struct sk_msg *msg_rx;
434 msg_rx = sk_psock_peek_msg(psock);
435 while (copied != len) {
436 struct scatterlist *sge;
438 if (unlikely(!msg_rx))
441 i = msg_rx->sg.start;
446 sge = sk_msg_elem(msg_rx, i);
449 if (copied + copy > len)
451 copy = copy_page_to_iter(page, sge->offset, copy, iter);
453 return copied ? copied : -EFAULT;
460 sk_mem_uncharge(sk, copy);
461 msg_rx->sg.size -= copy;
464 sk_msg_iter_var_next(i);
469 /* Lets not optimize peek case if copy_page_to_iter
470 * didn't copy the entire length lets just break.
472 if (copy != sge->length)
474 sk_msg_iter_var_next(i);
479 } while (i != msg_rx->sg.end);
481 if (unlikely(peek)) {
482 msg_rx = sk_psock_next_msg(psock, msg_rx);
488 msg_rx->sg.start = i;
489 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
490 msg_rx = sk_psock_dequeue_msg(psock);
491 kfree_sk_msg(msg_rx);
493 msg_rx = sk_psock_peek_msg(psock);
498 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
500 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
505 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
508 if (!sk_rmem_schedule(sk, skb, skb->truesize))
511 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
519 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
520 struct sk_psock *psock,
526 /* skb linearize may fail with ENOMEM, but lets simply try again
527 * later if this happens. Under memory pressure we don't want to
528 * drop the skb. We need to linearize the skb so that the mapping
529 * in skb_to_sgvec can not error.
531 if (skb_linearize(skb))
533 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
534 if (unlikely(num_sge < 0))
539 msg->sg.size = copied;
540 msg->sg.end = num_sge;
543 sk_psock_queue_msg(psock, msg);
544 sk_psock_data_ready(sk, psock);
548 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
550 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
552 struct sock *sk = psock->sk;
556 /* If we are receiving on the same sock skb->sk is already assigned,
557 * skip memory accounting and owner transition seeing it already set
560 if (unlikely(skb->sk == sk))
561 return sk_psock_skb_ingress_self(psock, skb);
562 msg = sk_psock_create_ingress_msg(sk, skb);
566 /* This will transition ownership of the data from the socket where
567 * the BPF program was run initiating the redirect to the socket
568 * we will eventually receive this data on. The data will be released
569 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
572 skb_set_owner_r(skb, sk);
573 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
579 /* Puts an skb on the ingress queue of the socket already assigned to the
580 * skb. In this case we do not need to check memory limits or skb_set_owner_r
581 * because the skb is already accounted for here.
583 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
585 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
586 struct sock *sk = psock->sk;
592 skb_set_owner_r(skb, sk);
593 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
599 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
600 u32 off, u32 len, bool ingress)
603 if (!sock_writeable(psock->sk))
605 return skb_send_sock(psock->sk, skb, off, len);
607 return sk_psock_skb_ingress(psock, skb);
610 static void sock_drop(struct sock *sk, struct sk_buff *skb)
612 sk_drops_add(sk, skb);
616 static void sk_psock_skb_state(struct sk_psock *psock,
617 struct sk_psock_work_state *state,
621 spin_lock_bh(&psock->ingress_lock);
622 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
627 sock_drop(psock->sk, skb);
629 spin_unlock_bh(&psock->ingress_lock);
632 static void sk_psock_backlog(struct work_struct *work)
634 struct sk_psock *psock = container_of(work, struct sk_psock, work);
635 struct sk_psock_work_state *state = &psock->work_state;
636 struct sk_buff *skb = NULL;
641 mutex_lock(&psock->work_mutex);
642 if (unlikely(state->skb)) {
643 spin_lock_bh(&psock->ingress_lock);
648 spin_unlock_bh(&psock->ingress_lock);
653 while ((skb = skb_dequeue(&psock->ingress_skb))) {
657 ingress = skb_bpf_ingress(skb);
658 skb_bpf_redirect_clear(skb);
661 if (!sock_flag(psock->sk, SOCK_DEAD))
662 ret = sk_psock_handle_skb(psock, skb, off,
665 if (ret == -EAGAIN) {
666 sk_psock_skb_state(psock, state, skb,
670 /* Hard errors break pipe and stop xmit. */
671 sk_psock_report_error(psock, ret ? -ret : EPIPE);
672 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
673 sock_drop(psock->sk, skb);
684 mutex_unlock(&psock->work_mutex);
687 struct sk_psock *sk_psock_init(struct sock *sk, int node)
689 struct sk_psock *psock;
692 write_lock_bh(&sk->sk_callback_lock);
694 if (sk->sk_user_data) {
695 psock = ERR_PTR(-EBUSY);
699 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
701 psock = ERR_PTR(-ENOMEM);
705 prot = READ_ONCE(sk->sk_prot);
707 psock->eval = __SK_NONE;
708 psock->sk_proto = prot;
709 psock->saved_unhash = prot->unhash;
710 psock->saved_close = prot->close;
711 psock->saved_write_space = sk->sk_write_space;
713 INIT_LIST_HEAD(&psock->link);
714 spin_lock_init(&psock->link_lock);
716 INIT_WORK(&psock->work, sk_psock_backlog);
717 mutex_init(&psock->work_mutex);
718 INIT_LIST_HEAD(&psock->ingress_msg);
719 spin_lock_init(&psock->ingress_lock);
720 skb_queue_head_init(&psock->ingress_skb);
722 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
723 refcount_set(&psock->refcnt, 1);
725 rcu_assign_sk_user_data_nocopy(sk, psock);
729 write_unlock_bh(&sk->sk_callback_lock);
732 EXPORT_SYMBOL_GPL(sk_psock_init);
734 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
736 struct sk_psock_link *link;
738 spin_lock_bh(&psock->link_lock);
739 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
742 list_del(&link->list);
743 spin_unlock_bh(&psock->link_lock);
747 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
749 struct sk_msg *msg, *tmp;
751 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
752 list_del(&msg->list);
753 sk_msg_free(psock->sk, msg);
758 static void __sk_psock_zap_ingress(struct sk_psock *psock)
762 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
763 skb_bpf_redirect_clear(skb);
764 sock_drop(psock->sk, skb);
766 kfree_skb(psock->work_state.skb);
767 /* We null the skb here to ensure that calls to sk_psock_backlog
768 * do not pick up the free'd skb.
770 psock->work_state.skb = NULL;
771 __sk_psock_purge_ingress_msg(psock);
774 static void sk_psock_link_destroy(struct sk_psock *psock)
776 struct sk_psock_link *link, *tmp;
778 list_for_each_entry_safe(link, tmp, &psock->link, list) {
779 list_del(&link->list);
780 sk_psock_free_link(link);
784 void sk_psock_stop(struct sk_psock *psock, bool wait)
786 spin_lock_bh(&psock->ingress_lock);
787 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
788 sk_psock_cork_free(psock);
789 __sk_psock_zap_ingress(psock);
790 spin_unlock_bh(&psock->ingress_lock);
793 cancel_work_sync(&psock->work);
796 static void sk_psock_done_strp(struct sk_psock *psock);
798 static void sk_psock_destroy(struct work_struct *work)
800 struct sk_psock *psock = container_of(to_rcu_work(work),
801 struct sk_psock, rwork);
802 /* No sk_callback_lock since already detached. */
804 sk_psock_done_strp(psock);
806 cancel_work_sync(&psock->work);
807 mutex_destroy(&psock->work_mutex);
809 psock_progs_drop(&psock->progs);
811 sk_psock_link_destroy(psock);
812 sk_psock_cork_free(psock);
815 sock_put(psock->sk_redir);
820 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
822 write_lock_bh(&sk->sk_callback_lock);
823 sk_psock_restore_proto(sk, psock);
824 rcu_assign_sk_user_data(sk, NULL);
825 if (psock->progs.stream_parser)
826 sk_psock_stop_strp(sk, psock);
827 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
828 sk_psock_stop_verdict(sk, psock);
829 write_unlock_bh(&sk->sk_callback_lock);
831 sk_psock_stop(psock, false);
833 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
834 queue_rcu_work(system_wq, &psock->rwork);
836 EXPORT_SYMBOL_GPL(sk_psock_drop);
838 static int sk_psock_map_verd(int verdict, bool redir)
842 return redir ? __SK_REDIRECT : __SK_PASS;
851 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
854 struct bpf_prog *prog;
858 prog = READ_ONCE(psock->progs.msg_parser);
859 if (unlikely(!prog)) {
864 sk_msg_compute_data_pointers(msg);
866 ret = bpf_prog_run_pin_on_cpu(prog, msg);
867 ret = sk_psock_map_verd(ret, msg->sk_redir);
868 psock->apply_bytes = msg->apply_bytes;
869 if (ret == __SK_REDIRECT) {
871 sock_put(psock->sk_redir);
872 psock->sk_redir = msg->sk_redir;
873 if (!psock->sk_redir) {
877 sock_hold(psock->sk_redir);
883 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
885 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
887 struct sk_psock *psock_other;
888 struct sock *sk_other;
890 sk_other = skb_bpf_redirect_fetch(skb);
891 /* This error is a buggy BPF program, it returned a redirect
892 * return code, but then didn't set a redirect interface.
894 if (unlikely(!sk_other)) {
895 sock_drop(from->sk, skb);
898 psock_other = sk_psock(sk_other);
899 /* This error indicates the socket is being torn down or had another
900 * error that caused the pipe to break. We can't send a packet on
901 * a socket that is in this state so we drop the skb.
903 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
904 skb_bpf_redirect_clear(skb);
905 sock_drop(from->sk, skb);
908 spin_lock_bh(&psock_other->ingress_lock);
909 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
910 spin_unlock_bh(&psock_other->ingress_lock);
911 skb_bpf_redirect_clear(skb);
912 sock_drop(from->sk, skb);
916 skb_queue_tail(&psock_other->ingress_skb, skb);
917 schedule_work(&psock_other->work);
918 spin_unlock_bh(&psock_other->ingress_lock);
922 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
923 struct sk_psock *from, int verdict)
927 sk_psock_skb_redirect(from, skb);
936 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
938 struct bpf_prog *prog;
942 prog = READ_ONCE(psock->progs.stream_verdict);
946 skb_bpf_redirect_clear(skb);
947 ret = bpf_prog_run_pin_on_cpu(prog, skb);
948 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
951 sk_psock_tls_verdict_apply(skb, psock, ret);
955 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
957 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
960 struct sock *sk_other;
966 sk_other = psock->sk;
967 if (sock_flag(sk_other, SOCK_DEAD) ||
968 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
972 skb_bpf_set_ingress(skb);
974 /* If the queue is empty then we can submit directly
975 * into the msg queue. If its not empty we have to
976 * queue work otherwise we may get OOO data. Otherwise,
977 * if sk_psock_skb_ingress errors will be handled by
978 * retrying later from workqueue.
980 if (skb_queue_empty(&psock->ingress_skb)) {
981 err = sk_psock_skb_ingress_self(psock, skb);
984 spin_lock_bh(&psock->ingress_lock);
985 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
986 skb_queue_tail(&psock->ingress_skb, skb);
987 schedule_work(&psock->work);
990 spin_unlock_bh(&psock->ingress_lock);
992 skb_bpf_redirect_clear(skb);
998 err = sk_psock_skb_redirect(psock, skb);
1003 sock_drop(psock->sk, skb);
1009 static void sk_psock_write_space(struct sock *sk)
1011 struct sk_psock *psock;
1012 void (*write_space)(struct sock *sk) = NULL;
1015 psock = sk_psock(sk);
1016 if (likely(psock)) {
1017 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1018 schedule_work(&psock->work);
1019 write_space = psock->saved_write_space;
1026 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1027 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1029 struct sk_psock *psock;
1030 struct bpf_prog *prog;
1031 int ret = __SK_DROP;
1036 psock = sk_psock(sk);
1037 if (unlikely(!psock)) {
1041 prog = READ_ONCE(psock->progs.stream_verdict);
1045 skb_bpf_redirect_clear(skb);
1046 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1047 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1050 sk_psock_verdict_apply(psock, skb, ret);
1055 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1060 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1062 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1063 struct bpf_prog *prog;
1067 prog = READ_ONCE(psock->progs.stream_parser);
1069 skb->sk = psock->sk;
1070 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1077 /* Called with socket lock held. */
1078 static void sk_psock_strp_data_ready(struct sock *sk)
1080 struct sk_psock *psock;
1083 psock = sk_psock(sk);
1084 if (likely(psock)) {
1085 if (tls_sw_has_ctx_rx(sk)) {
1086 psock->saved_data_ready(sk);
1088 write_lock_bh(&sk->sk_callback_lock);
1089 strp_data_ready(&psock->strp);
1090 write_unlock_bh(&sk->sk_callback_lock);
1096 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1098 static const struct strp_callbacks cb = {
1099 .rcv_msg = sk_psock_strp_read,
1100 .read_sock_done = sk_psock_strp_read_done,
1101 .parse_msg = sk_psock_strp_parse,
1104 return strp_init(&psock->strp, sk, &cb);
1107 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1109 if (psock->saved_data_ready)
1112 psock->saved_data_ready = sk->sk_data_ready;
1113 sk->sk_data_ready = sk_psock_strp_data_ready;
1114 sk->sk_write_space = sk_psock_write_space;
1117 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1119 if (!psock->saved_data_ready)
1122 sk->sk_data_ready = psock->saved_data_ready;
1123 psock->saved_data_ready = NULL;
1124 strp_stop(&psock->strp);
1127 static void sk_psock_done_strp(struct sk_psock *psock)
1129 /* Parser has been stopped */
1130 if (psock->progs.stream_parser)
1131 strp_done(&psock->strp);
1134 static void sk_psock_done_strp(struct sk_psock *psock)
1137 #endif /* CONFIG_BPF_STREAM_PARSER */
1139 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1140 unsigned int offset, size_t orig_len)
1142 struct sock *sk = (struct sock *)desc->arg.data;
1143 struct sk_psock *psock;
1144 struct bpf_prog *prog;
1145 int ret = __SK_DROP;
1148 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1149 skb = skb_clone(skb, GFP_ATOMIC);
1151 desc->error = -ENOMEM;
1156 psock = sk_psock(sk);
1157 if (unlikely(!psock)) {
1162 prog = READ_ONCE(psock->progs.stream_verdict);
1164 prog = READ_ONCE(psock->progs.skb_verdict);
1168 skb_bpf_redirect_clear(skb);
1169 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1170 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1173 if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1180 static void sk_psock_verdict_data_ready(struct sock *sk)
1182 struct socket *sock = sk->sk_socket;
1183 read_descriptor_t desc;
1185 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1192 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1195 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1197 if (psock->saved_data_ready)
1200 psock->saved_data_ready = sk->sk_data_ready;
1201 sk->sk_data_ready = sk_psock_verdict_data_ready;
1202 sk->sk_write_space = sk_psock_write_space;
1205 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1207 if (!psock->saved_data_ready)
1210 sk->sk_data_ready = psock->saved_data_ready;
1211 psock->saved_data_ready = NULL;