1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
29 struct page_frag *pfrag = sk_page_frag(sk);
30 u32 osize = msg->sg.size;
35 struct scatterlist *sge;
39 if (!sk_page_frag_refill(sk, pfrag)) {
44 orig_offset = pfrag->offset;
45 use = min_t(int, len, pfrag->size - orig_offset);
46 if (!sk_wmem_schedule(sk, use)) {
52 sk_msg_iter_var_prev(i);
53 sge = &msg->sg.data[i];
55 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56 sg_page(sge) == pfrag->page &&
57 sge->offset + sge->length == orig_offset) {
60 if (sk_msg_full(msg)) {
65 sge = &msg->sg.data[msg->sg.end];
67 sg_set_page(sge, pfrag->page, use, orig_offset);
68 get_page(pfrag->page);
69 sk_msg_iter_next(msg, end);
72 sk_mem_charge(sk, use);
81 sk_msg_trim(sk, msg, osize);
84 EXPORT_SYMBOL_GPL(sk_msg_alloc);
86 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
89 int i = src->sg.start;
90 struct scatterlist *sge = sk_msg_elem(src, i);
91 struct scatterlist *sgd = NULL;
95 if (sge->length > off)
98 sk_msg_iter_var_next(i);
99 if (i == src->sg.end && off)
101 sge = sk_msg_elem(src, i);
105 sge_len = sge->length - off;
110 sgd = sk_msg_elem(dst, dst->sg.end - 1);
113 (sg_page(sge) == sg_page(sgd)) &&
114 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115 sgd->length += sge_len;
116 dst->sg.size += sge_len;
117 } else if (!sk_msg_full(dst)) {
118 sge_off = sge->offset + off;
119 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
126 sk_mem_charge(sk, sge_len);
127 sk_msg_iter_var_next(i);
128 if (i == src->sg.end && len)
130 sge = sk_msg_elem(src, i);
135 EXPORT_SYMBOL_GPL(sk_msg_clone);
137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
139 int i = msg->sg.start;
142 struct scatterlist *sge = sk_msg_elem(msg, i);
144 if (bytes < sge->length) {
145 sge->length -= bytes;
146 sge->offset += bytes;
147 sk_mem_uncharge(sk, bytes);
151 sk_mem_uncharge(sk, sge->length);
152 bytes -= sge->length;
155 sk_msg_iter_var_next(i);
156 } while (bytes && i != msg->sg.end);
159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
163 int i = msg->sg.start;
166 struct scatterlist *sge = &msg->sg.data[i];
167 int uncharge = (bytes < sge->length) ? bytes : sge->length;
169 sk_mem_uncharge(sk, uncharge);
171 sk_msg_iter_var_next(i);
172 } while (i != msg->sg.end);
174 EXPORT_SYMBOL_GPL(sk_msg_return);
176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
179 struct scatterlist *sge = sk_msg_elem(msg, i);
180 u32 len = sge->length;
182 /* When the skb owns the memory we free it from consume_skb path. */
185 sk_mem_uncharge(sk, len);
186 put_page(sg_page(sge));
188 memset(sge, 0, sizeof(*sge));
192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
195 struct scatterlist *sge = sk_msg_elem(msg, i);
198 while (msg->sg.size) {
199 msg->sg.size -= sge->length;
200 freed += sk_msg_free_elem(sk, msg, i, charge);
201 sk_msg_iter_var_next(i);
202 sk_msg_check_to_free(msg, i, msg->sg.size);
203 sge = sk_msg_elem(msg, i);
205 consume_skb(msg->skb);
210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
212 return __sk_msg_free(sk, msg, msg->sg.start, false);
214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
218 return __sk_msg_free(sk, msg, msg->sg.start, true);
220 EXPORT_SYMBOL_GPL(sk_msg_free);
222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223 u32 bytes, bool charge)
225 struct scatterlist *sge;
226 u32 i = msg->sg.start;
229 sge = sk_msg_elem(msg, i);
232 if (bytes < sge->length) {
234 sk_mem_uncharge(sk, bytes);
235 sge->length -= bytes;
236 sge->offset += bytes;
237 msg->sg.size -= bytes;
241 msg->sg.size -= sge->length;
242 bytes -= sge->length;
243 sk_msg_free_elem(sk, msg, i, charge);
244 sk_msg_iter_var_next(i);
245 sk_msg_check_to_free(msg, i, bytes);
250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
252 __sk_msg_free_partial(sk, msg, bytes, true);
254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
259 __sk_msg_free_partial(sk, msg, bytes, false);
262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
264 int trim = msg->sg.size - len;
272 sk_msg_iter_var_prev(i);
274 while (msg->sg.data[i].length &&
275 trim >= msg->sg.data[i].length) {
276 trim -= msg->sg.data[i].length;
277 sk_msg_free_elem(sk, msg, i, true);
278 sk_msg_iter_var_prev(i);
283 msg->sg.data[i].length -= trim;
284 sk_mem_uncharge(sk, trim);
285 /* Adjust copybreak if it falls into the trimmed part of last buf */
286 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287 msg->sg.copybreak = msg->sg.data[i].length;
289 sk_msg_iter_var_next(i);
292 /* If we trim data a full sg elem before curr pointer update
293 * copybreak and current so that any future copy operations
294 * start at new copy location.
295 * However trimed data that has not yet been used in a copy op
296 * does not require an update.
299 msg->sg.curr = msg->sg.start;
300 msg->sg.copybreak = 0;
301 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303 sk_msg_iter_var_prev(i);
305 msg->sg.copybreak = msg->sg.data[i].length;
308 EXPORT_SYMBOL_GPL(sk_msg_trim);
310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311 struct sk_msg *msg, u32 bytes)
313 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314 const int to_max_pages = MAX_MSG_FRAGS;
315 struct page *pages[MAX_MSG_FRAGS];
316 ssize_t orig, copied, use, offset;
321 maxpages = to_max_pages - num_elems;
327 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
334 iov_iter_advance(from, copied);
336 msg->sg.size += copied;
339 use = min_t(int, copied, PAGE_SIZE - offset);
340 sg_set_page(&msg->sg.data[msg->sg.end],
341 pages[i], use, offset);
342 sg_unmark_end(&msg->sg.data[msg->sg.end]);
343 sk_mem_charge(sk, use);
347 sk_msg_iter_next(msg, end);
351 /* When zerocopy is mixed with sk_msg_*copy* operations we
352 * may have a copybreak set in this case clear and prefer
353 * zerocopy remainder when possible.
355 msg->sg.copybreak = 0;
356 msg->sg.curr = msg->sg.end;
359 /* Revert iov_iter updates, msg will need to use 'trim' later if it
360 * also needs to be cleared.
363 iov_iter_revert(from, msg->sg.size - orig);
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369 struct sk_msg *msg, u32 bytes)
371 int ret = -ENOSPC, i = msg->sg.curr;
372 struct scatterlist *sge;
377 sge = sk_msg_elem(msg, i);
378 /* This is possible if a trim operation shrunk the buffer */
379 if (msg->sg.copybreak >= sge->length) {
380 msg->sg.copybreak = 0;
381 sk_msg_iter_var_next(i);
382 if (i == msg->sg.end)
384 sge = sk_msg_elem(msg, i);
387 buf_size = sge->length - msg->sg.copybreak;
388 copy = (buf_size > bytes) ? bytes : buf_size;
389 to = sg_virt(sge) + msg->sg.copybreak;
390 msg->sg.copybreak += copy;
391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392 ret = copy_from_iter_nocache(to, copy, from);
394 ret = copy_from_iter(to, copy, from);
402 msg->sg.copybreak = 0;
403 sk_msg_iter_var_next(i);
404 } while (i != msg->sg.end);
409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
411 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
416 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
419 if (!sk_rmem_schedule(sk, skb, skb->truesize))
422 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
430 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
431 struct sk_psock *psock,
437 /* skb linearize may fail with ENOMEM, but lets simply try again
438 * later if this happens. Under memory pressure we don't want to
439 * drop the skb. We need to linearize the skb so that the mapping
440 * in skb_to_sgvec can not error.
442 if (skb_linearize(skb))
444 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
445 if (unlikely(num_sge < 0))
450 msg->sg.size = copied;
451 msg->sg.end = num_sge;
454 sk_psock_queue_msg(psock, msg);
455 sk_psock_data_ready(sk, psock);
459 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
461 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
463 struct sock *sk = psock->sk;
467 /* If we are receiving on the same sock skb->sk is already assigned,
468 * skip memory accounting and owner transition seeing it already set
471 if (unlikely(skb->sk == sk))
472 return sk_psock_skb_ingress_self(psock, skb);
473 msg = sk_psock_create_ingress_msg(sk, skb);
477 /* This will transition ownership of the data from the socket where
478 * the BPF program was run initiating the redirect to the socket
479 * we will eventually receive this data on. The data will be released
480 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
483 skb_set_owner_r(skb, sk);
484 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
490 /* Puts an skb on the ingress queue of the socket already assigned to the
491 * skb. In this case we do not need to check memory limits or skb_set_owner_r
492 * because the skb is already accounted for here.
494 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
496 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
497 struct sock *sk = psock->sk;
503 skb_set_owner_r(skb, sk);
504 err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
510 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
511 u32 off, u32 len, bool ingress)
514 if (!sock_writeable(psock->sk))
516 return skb_send_sock_locked(psock->sk, skb, off, len);
518 return sk_psock_skb_ingress(psock, skb);
521 static void sk_psock_backlog(struct work_struct *work)
523 struct sk_psock *psock = container_of(work, struct sk_psock, work);
524 struct sk_psock_work_state *state = &psock->work_state;
530 /* Lock sock to avoid losing sk_socket during loop. */
531 lock_sock(psock->sk);
540 while ((skb = skb_dequeue(&psock->ingress_skb))) {
544 ingress = tcp_skb_bpf_ingress(skb);
547 if (likely(psock->sk->sk_socket))
548 ret = sk_psock_handle_skb(psock, skb, off,
551 if (ret == -EAGAIN) {
557 /* Hard errors break pipe and stop xmit. */
558 sk_psock_report_error(psock, ret ? -ret : EPIPE);
559 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
571 release_sock(psock->sk);
574 struct sk_psock *sk_psock_init(struct sock *sk, int node)
576 struct sk_psock *psock;
579 write_lock_bh(&sk->sk_callback_lock);
581 if (inet_csk_has_ulp(sk)) {
582 psock = ERR_PTR(-EINVAL);
586 if (sk->sk_user_data) {
587 psock = ERR_PTR(-EBUSY);
591 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
593 psock = ERR_PTR(-ENOMEM);
597 prot = READ_ONCE(sk->sk_prot);
599 psock->eval = __SK_NONE;
600 psock->sk_proto = prot;
601 psock->saved_unhash = prot->unhash;
602 psock->saved_close = prot->close;
603 psock->saved_write_space = sk->sk_write_space;
605 INIT_LIST_HEAD(&psock->link);
606 spin_lock_init(&psock->link_lock);
608 INIT_WORK(&psock->work, sk_psock_backlog);
609 INIT_LIST_HEAD(&psock->ingress_msg);
610 skb_queue_head_init(&psock->ingress_skb);
612 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
613 refcount_set(&psock->refcnt, 1);
615 __rcu_assign_sk_user_data_with_flags(sk, psock,
616 SK_USER_DATA_NOCOPY |
621 write_unlock_bh(&sk->sk_callback_lock);
624 EXPORT_SYMBOL_GPL(sk_psock_init);
626 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
628 struct sk_psock_link *link;
630 spin_lock_bh(&psock->link_lock);
631 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
634 list_del(&link->list);
635 spin_unlock_bh(&psock->link_lock);
639 void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
641 struct sk_msg *msg, *tmp;
643 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
644 list_del(&msg->list);
645 sk_msg_free(psock->sk, msg);
650 static void sk_psock_zap_ingress(struct sk_psock *psock)
652 __skb_queue_purge(&psock->ingress_skb);
653 __sk_psock_purge_ingress_msg(psock);
656 static void sk_psock_link_destroy(struct sk_psock *psock)
658 struct sk_psock_link *link, *tmp;
660 list_for_each_entry_safe(link, tmp, &psock->link, list) {
661 list_del(&link->list);
662 sk_psock_free_link(link);
666 static void sk_psock_destroy_deferred(struct work_struct *gc)
668 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
670 /* No sk_callback_lock since already detached. */
672 /* Parser has been stopped */
673 if (psock->progs.skb_parser)
674 strp_done(&psock->parser.strp);
676 cancel_work_sync(&psock->work);
678 psock_progs_drop(&psock->progs);
680 sk_psock_link_destroy(psock);
681 sk_psock_cork_free(psock);
682 sk_psock_zap_ingress(psock);
685 sock_put(psock->sk_redir);
690 static void sk_psock_destroy(struct rcu_head *rcu)
692 struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
694 INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
695 schedule_work(&psock->gc);
698 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
700 sk_psock_cork_free(psock);
701 sk_psock_zap_ingress(psock);
703 write_lock_bh(&sk->sk_callback_lock);
704 sk_psock_restore_proto(sk, psock);
705 rcu_assign_sk_user_data(sk, NULL);
706 if (psock->progs.skb_parser)
707 sk_psock_stop_strp(sk, psock);
708 else if (psock->progs.skb_verdict)
709 sk_psock_stop_verdict(sk, psock);
710 write_unlock_bh(&sk->sk_callback_lock);
711 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
713 call_rcu(&psock->rcu, sk_psock_destroy);
715 EXPORT_SYMBOL_GPL(sk_psock_drop);
717 static int sk_psock_map_verd(int verdict, bool redir)
721 return redir ? __SK_REDIRECT : __SK_PASS;
730 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
733 struct bpf_prog *prog;
737 prog = READ_ONCE(psock->progs.msg_parser);
738 if (unlikely(!prog)) {
743 sk_msg_compute_data_pointers(msg);
745 ret = bpf_prog_run_pin_on_cpu(prog, msg);
746 ret = sk_psock_map_verd(ret, msg->sk_redir);
747 psock->apply_bytes = msg->apply_bytes;
748 if (ret == __SK_REDIRECT) {
750 sock_put(psock->sk_redir);
751 psock->sk_redir = msg->sk_redir;
752 if (!psock->sk_redir) {
756 sock_hold(psock->sk_redir);
762 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
764 static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
767 bpf_compute_data_end_sk_skb(skb);
768 return bpf_prog_run_pin_on_cpu(prog, skb);
771 static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
773 struct sk_psock_parser *parser;
775 parser = container_of(strp, struct sk_psock_parser, strp);
776 return container_of(parser, struct sk_psock, parser);
779 static void sk_psock_skb_redirect(struct sk_buff *skb)
781 struct sk_psock *psock_other;
782 struct sock *sk_other;
784 sk_other = tcp_skb_bpf_redirect_fetch(skb);
785 /* This error is a buggy BPF program, it returned a redirect
786 * return code, but then didn't set a redirect interface.
788 if (unlikely(!sk_other)) {
792 psock_other = sk_psock(sk_other);
793 /* This error indicates the socket is being torn down or had another
794 * error that caused the pipe to break. We can't send a packet on
795 * a socket that is in this state so we drop the skb.
797 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
798 !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
803 skb_queue_tail(&psock_other->ingress_skb, skb);
804 schedule_work(&psock_other->work);
807 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
811 sk_psock_skb_redirect(skb);
820 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
822 struct bpf_prog *prog;
826 prog = READ_ONCE(psock->progs.skb_verdict);
829 tcp_skb_bpf_redirect_clear(skb);
830 ret = sk_psock_bpf_run(psock, prog, skb);
831 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
834 sk_psock_tls_verdict_apply(skb, psock->sk, ret);
838 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
840 static void sk_psock_verdict_apply(struct sk_psock *psock,
841 struct sk_buff *skb, int verdict)
843 struct tcp_skb_cb *tcp;
844 struct sock *sk_other;
849 sk_other = psock->sk;
850 if (sock_flag(sk_other, SOCK_DEAD) ||
851 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
855 tcp = TCP_SKB_CB(skb);
856 tcp->bpf.flags |= BPF_F_INGRESS;
858 /* If the queue is empty then we can submit directly
859 * into the msg queue. If its not empty we have to
860 * queue work otherwise we may get OOO data. Otherwise,
861 * if sk_psock_skb_ingress errors will be handled by
862 * retrying later from workqueue.
864 if (skb_queue_empty(&psock->ingress_skb)) {
865 err = sk_psock_skb_ingress_self(psock, skb);
868 skb_queue_tail(&psock->ingress_skb, skb);
869 schedule_work(&psock->work);
873 sk_psock_skb_redirect(skb);
882 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
884 struct sk_psock *psock;
885 struct bpf_prog *prog;
891 psock = sk_psock(sk);
892 if (unlikely(!psock)) {
896 prog = READ_ONCE(psock->progs.skb_verdict);
899 tcp_skb_bpf_redirect_clear(skb);
900 ret = sk_psock_bpf_run(psock, prog, skb);
901 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
904 sk_psock_verdict_apply(psock, skb, ret);
909 static int sk_psock_strp_read_done(struct strparser *strp, int err)
914 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
916 struct sk_psock *psock = sk_psock_from_strp(strp);
917 struct bpf_prog *prog;
921 prog = READ_ONCE(psock->progs.skb_parser);
924 ret = sk_psock_bpf_run(psock, prog, skb);
931 /* Called with socket lock held. */
932 static void sk_psock_strp_data_ready(struct sock *sk)
934 struct sk_psock *psock;
937 psock = sk_psock(sk);
939 if (tls_sw_has_ctx_rx(sk)) {
940 psock->parser.saved_data_ready(sk);
942 write_lock_bh(&sk->sk_callback_lock);
943 strp_data_ready(&psock->parser.strp);
944 write_unlock_bh(&sk->sk_callback_lock);
950 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
951 unsigned int offset, size_t orig_len)
953 struct sock *sk = (struct sock *)desc->arg.data;
954 struct sk_psock *psock;
955 struct bpf_prog *prog;
959 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
960 skb = skb_clone(skb, GFP_ATOMIC);
962 desc->error = -ENOMEM;
967 psock = sk_psock(sk);
968 if (unlikely(!psock)) {
973 prog = READ_ONCE(psock->progs.skb_verdict);
976 tcp_skb_bpf_redirect_clear(skb);
977 ret = sk_psock_bpf_run(psock, prog, skb);
978 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
981 sk_psock_verdict_apply(psock, skb, ret);
987 static void sk_psock_verdict_data_ready(struct sock *sk)
989 struct socket *sock = sk->sk_socket;
990 read_descriptor_t desc;
992 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
999 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1002 static void sk_psock_write_space(struct sock *sk)
1004 struct sk_psock *psock;
1005 void (*write_space)(struct sock *sk) = NULL;
1008 psock = sk_psock(sk);
1009 if (likely(psock)) {
1010 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1011 schedule_work(&psock->work);
1012 write_space = psock->saved_write_space;
1019 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1021 static const struct strp_callbacks cb = {
1022 .rcv_msg = sk_psock_strp_read,
1023 .read_sock_done = sk_psock_strp_read_done,
1024 .parse_msg = sk_psock_strp_parse,
1027 psock->parser.enabled = false;
1028 return strp_init(&psock->parser.strp, sk, &cb);
1031 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1033 struct sk_psock_parser *parser = &psock->parser;
1035 if (parser->enabled)
1038 parser->saved_data_ready = sk->sk_data_ready;
1039 sk->sk_data_ready = sk_psock_verdict_data_ready;
1040 sk->sk_write_space = sk_psock_write_space;
1041 parser->enabled = true;
1044 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1046 struct sk_psock_parser *parser = &psock->parser;
1048 if (parser->enabled)
1051 parser->saved_data_ready = sk->sk_data_ready;
1052 sk->sk_data_ready = sk_psock_strp_data_ready;
1053 sk->sk_write_space = sk_psock_write_space;
1054 parser->enabled = true;
1057 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1059 struct sk_psock_parser *parser = &psock->parser;
1061 if (!parser->enabled)
1064 sk->sk_data_ready = parser->saved_data_ready;
1065 parser->saved_data_ready = NULL;
1066 strp_stop(&parser->strp);
1067 parser->enabled = false;
1070 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1072 struct sk_psock_parser *parser = &psock->parser;
1074 if (!parser->enabled)
1077 sk->sk_data_ready = parser->saved_data_ready;
1078 parser->saved_data_ready = NULL;
1079 parser->enabled = false;