GNU Linux-libre 5.4.207-gnu1
[releases.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         u32 osize = msg->sg.size;
31         int ret = 0;
32
33         len -= msg->sg.size;
34         while (len > 0) {
35                 struct scatterlist *sge;
36                 u32 orig_offset;
37                 int use, i;
38
39                 if (!sk_page_frag_refill(sk, pfrag)) {
40                         ret = -ENOMEM;
41                         goto msg_trim;
42                 }
43
44                 orig_offset = pfrag->offset;
45                 use = min_t(int, len, pfrag->size - orig_offset);
46                 if (!sk_wmem_schedule(sk, use)) {
47                         ret = -ENOMEM;
48                         goto msg_trim;
49                 }
50
51                 i = msg->sg.end;
52                 sk_msg_iter_var_prev(i);
53                 sge = &msg->sg.data[i];
54
55                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56                     sg_page(sge) == pfrag->page &&
57                     sge->offset + sge->length == orig_offset) {
58                         sge->length += use;
59                 } else {
60                         if (sk_msg_full(msg)) {
61                                 ret = -ENOSPC;
62                                 break;
63                         }
64
65                         sge = &msg->sg.data[msg->sg.end];
66                         sg_unmark_end(sge);
67                         sg_set_page(sge, pfrag->page, use, orig_offset);
68                         get_page(pfrag->page);
69                         sk_msg_iter_next(msg, end);
70                 }
71
72                 sk_mem_charge(sk, use);
73                 msg->sg.size += use;
74                 pfrag->offset += use;
75                 len -= use;
76         }
77
78         return ret;
79
80 msg_trim:
81         sk_msg_trim(sk, msg, osize);
82         return ret;
83 }
84 EXPORT_SYMBOL_GPL(sk_msg_alloc);
85
86 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87                  u32 off, u32 len)
88 {
89         int i = src->sg.start;
90         struct scatterlist *sge = sk_msg_elem(src, i);
91         struct scatterlist *sgd = NULL;
92         u32 sge_len, sge_off;
93
94         while (off) {
95                 if (sge->length > off)
96                         break;
97                 off -= sge->length;
98                 sk_msg_iter_var_next(i);
99                 if (i == src->sg.end && off)
100                         return -ENOSPC;
101                 sge = sk_msg_elem(src, i);
102         }
103
104         while (len) {
105                 sge_len = sge->length - off;
106                 if (sge_len > len)
107                         sge_len = len;
108
109                 if (dst->sg.end)
110                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
111
112                 if (sgd &&
113                     (sg_page(sge) == sg_page(sgd)) &&
114                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115                         sgd->length += sge_len;
116                         dst->sg.size += sge_len;
117                 } else if (!sk_msg_full(dst)) {
118                         sge_off = sge->offset + off;
119                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120                 } else {
121                         return -ENOSPC;
122                 }
123
124                 off = 0;
125                 len -= sge_len;
126                 sk_mem_charge(sk, sge_len);
127                 sk_msg_iter_var_next(i);
128                 if (i == src->sg.end && len)
129                         return -ENOSPC;
130                 sge = sk_msg_elem(src, i);
131         }
132
133         return 0;
134 }
135 EXPORT_SYMBOL_GPL(sk_msg_clone);
136
137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138 {
139         int i = msg->sg.start;
140
141         do {
142                 struct scatterlist *sge = sk_msg_elem(msg, i);
143
144                 if (bytes < sge->length) {
145                         sge->length -= bytes;
146                         sge->offset += bytes;
147                         sk_mem_uncharge(sk, bytes);
148                         break;
149                 }
150
151                 sk_mem_uncharge(sk, sge->length);
152                 bytes -= sge->length;
153                 sge->length = 0;
154                 sge->offset = 0;
155                 sk_msg_iter_var_next(i);
156         } while (bytes && i != msg->sg.end);
157         msg->sg.start = i;
158 }
159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160
161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162 {
163         int i = msg->sg.start;
164
165         do {
166                 struct scatterlist *sge = &msg->sg.data[i];
167                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
168
169                 sk_mem_uncharge(sk, uncharge);
170                 bytes -= uncharge;
171                 sk_msg_iter_var_next(i);
172         } while (i != msg->sg.end);
173 }
174 EXPORT_SYMBOL_GPL(sk_msg_return);
175
176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177                             bool charge)
178 {
179         struct scatterlist *sge = sk_msg_elem(msg, i);
180         u32 len = sge->length;
181
182         /* When the skb owns the memory we free it from consume_skb path. */
183         if (!msg->skb) {
184                 if (charge)
185                         sk_mem_uncharge(sk, len);
186                 put_page(sg_page(sge));
187         }
188         memset(sge, 0, sizeof(*sge));
189         return len;
190 }
191
192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193                          bool charge)
194 {
195         struct scatterlist *sge = sk_msg_elem(msg, i);
196         int freed = 0;
197
198         while (msg->sg.size) {
199                 msg->sg.size -= sge->length;
200                 freed += sk_msg_free_elem(sk, msg, i, charge);
201                 sk_msg_iter_var_next(i);
202                 sk_msg_check_to_free(msg, i, msg->sg.size);
203                 sge = sk_msg_elem(msg, i);
204         }
205         consume_skb(msg->skb);
206         sk_msg_init(msg);
207         return freed;
208 }
209
210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211 {
212         return __sk_msg_free(sk, msg, msg->sg.start, false);
213 }
214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215
216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217 {
218         return __sk_msg_free(sk, msg, msg->sg.start, true);
219 }
220 EXPORT_SYMBOL_GPL(sk_msg_free);
221
222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223                                   u32 bytes, bool charge)
224 {
225         struct scatterlist *sge;
226         u32 i = msg->sg.start;
227
228         while (bytes) {
229                 sge = sk_msg_elem(msg, i);
230                 if (!sge->length)
231                         break;
232                 if (bytes < sge->length) {
233                         if (charge)
234                                 sk_mem_uncharge(sk, bytes);
235                         sge->length -= bytes;
236                         sge->offset += bytes;
237                         msg->sg.size -= bytes;
238                         break;
239                 }
240
241                 msg->sg.size -= sge->length;
242                 bytes -= sge->length;
243                 sk_msg_free_elem(sk, msg, i, charge);
244                 sk_msg_iter_var_next(i);
245                 sk_msg_check_to_free(msg, i, bytes);
246         }
247         msg->sg.start = i;
248 }
249
250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251 {
252         __sk_msg_free_partial(sk, msg, bytes, true);
253 }
254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255
256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257                                   u32 bytes)
258 {
259         __sk_msg_free_partial(sk, msg, bytes, false);
260 }
261
262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263 {
264         int trim = msg->sg.size - len;
265         u32 i = msg->sg.end;
266
267         if (trim <= 0) {
268                 WARN_ON(trim < 0);
269                 return;
270         }
271
272         sk_msg_iter_var_prev(i);
273         msg->sg.size = len;
274         while (msg->sg.data[i].length &&
275                trim >= msg->sg.data[i].length) {
276                 trim -= msg->sg.data[i].length;
277                 sk_msg_free_elem(sk, msg, i, true);
278                 sk_msg_iter_var_prev(i);
279                 if (!trim)
280                         goto out;
281         }
282
283         msg->sg.data[i].length -= trim;
284         sk_mem_uncharge(sk, trim);
285         /* Adjust copybreak if it falls into the trimmed part of last buf */
286         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287                 msg->sg.copybreak = msg->sg.data[i].length;
288 out:
289         sk_msg_iter_var_next(i);
290         msg->sg.end = i;
291
292         /* If we trim data a full sg elem before curr pointer update
293          * copybreak and current so that any future copy operations
294          * start at new copy location.
295          * However trimed data that has not yet been used in a copy op
296          * does not require an update.
297          */
298         if (!msg->sg.size) {
299                 msg->sg.curr = msg->sg.start;
300                 msg->sg.copybreak = 0;
301         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303                 sk_msg_iter_var_prev(i);
304                 msg->sg.curr = i;
305                 msg->sg.copybreak = msg->sg.data[i].length;
306         }
307 }
308 EXPORT_SYMBOL_GPL(sk_msg_trim);
309
310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311                               struct sk_msg *msg, u32 bytes)
312 {
313         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314         const int to_max_pages = MAX_MSG_FRAGS;
315         struct page *pages[MAX_MSG_FRAGS];
316         ssize_t orig, copied, use, offset;
317
318         orig = msg->sg.size;
319         while (bytes > 0) {
320                 i = 0;
321                 maxpages = to_max_pages - num_elems;
322                 if (maxpages == 0) {
323                         ret = -EFAULT;
324                         goto out;
325                 }
326
327                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
328                                             &offset);
329                 if (copied <= 0) {
330                         ret = -EFAULT;
331                         goto out;
332                 }
333
334                 iov_iter_advance(from, copied);
335                 bytes -= copied;
336                 msg->sg.size += copied;
337
338                 while (copied) {
339                         use = min_t(int, copied, PAGE_SIZE - offset);
340                         sg_set_page(&msg->sg.data[msg->sg.end],
341                                     pages[i], use, offset);
342                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
343                         sk_mem_charge(sk, use);
344
345                         offset = 0;
346                         copied -= use;
347                         sk_msg_iter_next(msg, end);
348                         num_elems++;
349                         i++;
350                 }
351                 /* When zerocopy is mixed with sk_msg_*copy* operations we
352                  * may have a copybreak set in this case clear and prefer
353                  * zerocopy remainder when possible.
354                  */
355                 msg->sg.copybreak = 0;
356                 msg->sg.curr = msg->sg.end;
357         }
358 out:
359         /* Revert iov_iter updates, msg will need to use 'trim' later if it
360          * also needs to be cleared.
361          */
362         if (ret)
363                 iov_iter_revert(from, msg->sg.size - orig);
364         return ret;
365 }
366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
367
368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
369                              struct sk_msg *msg, u32 bytes)
370 {
371         int ret = -ENOSPC, i = msg->sg.curr;
372         struct scatterlist *sge;
373         u32 copy, buf_size;
374         void *to;
375
376         do {
377                 sge = sk_msg_elem(msg, i);
378                 /* This is possible if a trim operation shrunk the buffer */
379                 if (msg->sg.copybreak >= sge->length) {
380                         msg->sg.copybreak = 0;
381                         sk_msg_iter_var_next(i);
382                         if (i == msg->sg.end)
383                                 break;
384                         sge = sk_msg_elem(msg, i);
385                 }
386
387                 buf_size = sge->length - msg->sg.copybreak;
388                 copy = (buf_size > bytes) ? bytes : buf_size;
389                 to = sg_virt(sge) + msg->sg.copybreak;
390                 msg->sg.copybreak += copy;
391                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
392                         ret = copy_from_iter_nocache(to, copy, from);
393                 else
394                         ret = copy_from_iter(to, copy, from);
395                 if (ret != copy) {
396                         ret = -EFAULT;
397                         goto out;
398                 }
399                 bytes -= copy;
400                 if (!bytes)
401                         break;
402                 msg->sg.copybreak = 0;
403                 sk_msg_iter_var_next(i);
404         } while (i != msg->sg.end);
405 out:
406         msg->sg.curr = i;
407         return ret;
408 }
409 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
410
411 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
412                                                   struct sk_buff *skb)
413 {
414         struct sk_msg *msg;
415
416         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
417                 return NULL;
418
419         if (!sk_rmem_schedule(sk, skb, skb->truesize))
420                 return NULL;
421
422         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
423         if (unlikely(!msg))
424                 return NULL;
425
426         sk_msg_init(msg);
427         return msg;
428 }
429
430 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
431                                         struct sk_psock *psock,
432                                         struct sock *sk,
433                                         struct sk_msg *msg)
434 {
435         int num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
436         int copied;
437
438         if (unlikely(num_sge < 0)) {
439                 kfree(msg);
440                 return num_sge;
441         }
442
443         copied = skb->len;
444         msg->sg.start = 0;
445         msg->sg.size = copied;
446         msg->sg.end = num_sge;
447         msg->skb = skb;
448
449         sk_psock_queue_msg(psock, msg);
450         sk_psock_data_ready(sk, psock);
451         return copied;
452 }
453
454 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
455 {
456         struct sock *sk = psock->sk;
457         struct sk_msg *msg;
458
459         msg = sk_psock_create_ingress_msg(sk, skb);
460         if (!msg)
461                 return -EAGAIN;
462
463         /* This will transition ownership of the data from the socket where
464          * the BPF program was run initiating the redirect to the socket
465          * we will eventually receive this data on. The data will be released
466          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
467          * into user buffers.
468          */
469         skb_set_owner_r(skb, sk);
470         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
471 }
472
473 /* Puts an skb on the ingress queue of the socket already assigned to the
474  * skb. In this case we do not need to check memory limits or skb_set_owner_r
475  * because the skb is already accounted for here.
476  */
477 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
478 {
479         struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
480         struct sock *sk = psock->sk;
481
482         if (unlikely(!msg))
483                 return -EAGAIN;
484         sk_msg_init(msg);
485         return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
486 }
487
488 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
489                                u32 off, u32 len, bool ingress)
490 {
491         if (ingress)
492                 return sk_psock_skb_ingress(psock, skb);
493         else
494                 return skb_send_sock_locked(psock->sk, skb, off, len);
495 }
496
497 static void sk_psock_backlog(struct work_struct *work)
498 {
499         struct sk_psock *psock = container_of(work, struct sk_psock, work);
500         struct sk_psock_work_state *state = &psock->work_state;
501         struct sk_buff *skb;
502         bool ingress;
503         u32 len, off;
504         int ret;
505
506         /* Lock sock to avoid losing sk_socket during loop. */
507         lock_sock(psock->sk);
508         if (state->skb) {
509                 skb = state->skb;
510                 len = state->len;
511                 off = state->off;
512                 state->skb = NULL;
513                 goto start;
514         }
515
516         while ((skb = skb_dequeue(&psock->ingress_skb))) {
517                 len = skb->len;
518                 off = 0;
519 start:
520                 ingress = tcp_skb_bpf_ingress(skb);
521                 do {
522                         ret = -EIO;
523                         if (likely(psock->sk->sk_socket))
524                                 ret = sk_psock_handle_skb(psock, skb, off,
525                                                           len, ingress);
526                         if (ret <= 0) {
527                                 if (ret == -EAGAIN) {
528                                         state->skb = skb;
529                                         state->len = len;
530                                         state->off = off;
531                                         goto end;
532                                 }
533                                 /* Hard errors break pipe and stop xmit. */
534                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
535                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
536                                 kfree_skb(skb);
537                                 goto end;
538                         }
539                         off += ret;
540                         len -= ret;
541                 } while (len);
542
543                 if (!ingress)
544                         kfree_skb(skb);
545         }
546 end:
547         release_sock(psock->sk);
548 }
549
550 struct sk_psock *sk_psock_init(struct sock *sk, int node)
551 {
552         struct sk_psock *psock = kzalloc_node(sizeof(*psock),
553                                               GFP_ATOMIC | __GFP_NOWARN,
554                                               node);
555         if (!psock)
556                 return NULL;
557
558         psock->sk = sk;
559         psock->eval =  __SK_NONE;
560
561         INIT_LIST_HEAD(&psock->link);
562         spin_lock_init(&psock->link_lock);
563
564         INIT_WORK(&psock->work, sk_psock_backlog);
565         INIT_LIST_HEAD(&psock->ingress_msg);
566         skb_queue_head_init(&psock->ingress_skb);
567
568         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
569         refcount_set(&psock->refcnt, 1);
570
571         rcu_assign_sk_user_data(sk, psock);
572         sock_hold(sk);
573
574         return psock;
575 }
576 EXPORT_SYMBOL_GPL(sk_psock_init);
577
578 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
579 {
580         struct sk_psock_link *link;
581
582         spin_lock_bh(&psock->link_lock);
583         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
584                                         list);
585         if (link)
586                 list_del(&link->list);
587         spin_unlock_bh(&psock->link_lock);
588         return link;
589 }
590
591 void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
592 {
593         struct sk_msg *msg, *tmp;
594
595         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
596                 list_del(&msg->list);
597                 sk_msg_free(psock->sk, msg);
598                 kfree(msg);
599         }
600 }
601
602 static void sk_psock_zap_ingress(struct sk_psock *psock)
603 {
604         __skb_queue_purge(&psock->ingress_skb);
605         __sk_psock_purge_ingress_msg(psock);
606 }
607
608 static void sk_psock_link_destroy(struct sk_psock *psock)
609 {
610         struct sk_psock_link *link, *tmp;
611
612         list_for_each_entry_safe(link, tmp, &psock->link, list) {
613                 list_del(&link->list);
614                 sk_psock_free_link(link);
615         }
616 }
617
618 static void sk_psock_destroy_deferred(struct work_struct *gc)
619 {
620         struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
621
622         /* No sk_callback_lock since already detached. */
623
624         /* Parser has been stopped */
625         if (psock->progs.skb_parser)
626                 strp_done(&psock->parser.strp);
627
628         cancel_work_sync(&psock->work);
629
630         psock_progs_drop(&psock->progs);
631
632         sk_psock_link_destroy(psock);
633         sk_psock_cork_free(psock);
634         sk_psock_zap_ingress(psock);
635
636         if (psock->sk_redir)
637                 sock_put(psock->sk_redir);
638         sock_put(psock->sk);
639         kfree(psock);
640 }
641
642 void sk_psock_destroy(struct rcu_head *rcu)
643 {
644         struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
645
646         INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
647         schedule_work(&psock->gc);
648 }
649 EXPORT_SYMBOL_GPL(sk_psock_destroy);
650
651 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
652 {
653         sk_psock_cork_free(psock);
654         sk_psock_zap_ingress(psock);
655
656         write_lock_bh(&sk->sk_callback_lock);
657         sk_psock_restore_proto(sk, psock);
658         rcu_assign_sk_user_data(sk, NULL);
659         if (psock->progs.skb_parser)
660                 sk_psock_stop_strp(sk, psock);
661         write_unlock_bh(&sk->sk_callback_lock);
662         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
663
664         call_rcu(&psock->rcu, sk_psock_destroy);
665 }
666 EXPORT_SYMBOL_GPL(sk_psock_drop);
667
668 static int sk_psock_map_verd(int verdict, bool redir)
669 {
670         switch (verdict) {
671         case SK_PASS:
672                 return redir ? __SK_REDIRECT : __SK_PASS;
673         case SK_DROP:
674         default:
675                 break;
676         }
677
678         return __SK_DROP;
679 }
680
681 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
682                          struct sk_msg *msg)
683 {
684         struct bpf_prog *prog;
685         int ret;
686
687         preempt_disable();
688         rcu_read_lock();
689         prog = READ_ONCE(psock->progs.msg_parser);
690         if (unlikely(!prog)) {
691                 ret = __SK_PASS;
692                 goto out;
693         }
694
695         sk_msg_compute_data_pointers(msg);
696         msg->sk = sk;
697         ret = BPF_PROG_RUN(prog, msg);
698         ret = sk_psock_map_verd(ret, msg->sk_redir);
699         psock->apply_bytes = msg->apply_bytes;
700         if (ret == __SK_REDIRECT) {
701                 if (psock->sk_redir)
702                         sock_put(psock->sk_redir);
703                 psock->sk_redir = msg->sk_redir;
704                 if (!psock->sk_redir) {
705                         ret = __SK_DROP;
706                         goto out;
707                 }
708                 sock_hold(psock->sk_redir);
709         }
710 out:
711         rcu_read_unlock();
712         preempt_enable();
713         return ret;
714 }
715 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
716
717 static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
718                             struct sk_buff *skb)
719 {
720         int ret;
721
722         skb->sk = psock->sk;
723         bpf_compute_data_end_sk_skb(skb);
724         preempt_disable();
725         ret = BPF_PROG_RUN(prog, skb);
726         preempt_enable();
727         /* strparser clones the skb before handing it to a upper layer,
728          * meaning skb_orphan has been called. We NULL sk on the way out
729          * to ensure we don't trigger a BUG_ON() in skb/sk operations
730          * later and because we are not charging the memory of this skb
731          * to any socket yet.
732          */
733         skb->sk = NULL;
734         return ret;
735 }
736
737 static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
738 {
739         struct sk_psock_parser *parser;
740
741         parser = container_of(strp, struct sk_psock_parser, strp);
742         return container_of(parser, struct sk_psock, parser);
743 }
744
745 static void sk_psock_skb_redirect(struct sk_buff *skb)
746 {
747         struct sk_psock *psock_other;
748         struct sock *sk_other;
749         bool ingress;
750
751         sk_other = tcp_skb_bpf_redirect_fetch(skb);
752         if (unlikely(!sk_other)) {
753                 kfree_skb(skb);
754                 return;
755         }
756         psock_other = sk_psock(sk_other);
757         if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
758             !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
759                 kfree_skb(skb);
760                 return;
761         }
762
763         ingress = tcp_skb_bpf_ingress(skb);
764         if ((!ingress && sock_writeable(sk_other)) ||
765             (ingress &&
766              atomic_read(&sk_other->sk_rmem_alloc) <=
767              sk_other->sk_rcvbuf)) {
768                 if (!ingress)
769                         skb_set_owner_w(skb, sk_other);
770                 skb_queue_tail(&psock_other->ingress_skb, skb);
771                 schedule_work(&psock_other->work);
772         } else {
773                 kfree_skb(skb);
774         }
775 }
776
777 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
778 {
779         switch (verdict) {
780         case __SK_REDIRECT:
781                 sk_psock_skb_redirect(skb);
782                 break;
783         case __SK_PASS:
784         case __SK_DROP:
785         default:
786                 break;
787         }
788 }
789
790 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
791 {
792         struct bpf_prog *prog;
793         int ret = __SK_PASS;
794
795         rcu_read_lock();
796         prog = READ_ONCE(psock->progs.skb_verdict);
797         if (likely(prog)) {
798                 tcp_skb_bpf_redirect_clear(skb);
799                 ret = sk_psock_bpf_run(psock, prog, skb);
800                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
801         }
802         sk_psock_tls_verdict_apply(skb, ret);
803         rcu_read_unlock();
804         return ret;
805 }
806 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
807
808 static void sk_psock_verdict_apply(struct sk_psock *psock,
809                                    struct sk_buff *skb, int verdict)
810 {
811         struct tcp_skb_cb *tcp;
812         struct sock *sk_other;
813         int err = -EIO;
814
815         switch (verdict) {
816         case __SK_PASS:
817                 sk_other = psock->sk;
818                 if (sock_flag(sk_other, SOCK_DEAD) ||
819                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
820                         goto out_free;
821                 }
822
823                 tcp = TCP_SKB_CB(skb);
824                 tcp->bpf.flags |= BPF_F_INGRESS;
825
826                 /* If the queue is empty then we can submit directly
827                  * into the msg queue. If its not empty we have to
828                  * queue work otherwise we may get OOO data. Otherwise,
829                  * if sk_psock_skb_ingress errors will be handled by
830                  * retrying later from workqueue.
831                  */
832                 if (skb_queue_empty(&psock->ingress_skb)) {
833                         err = sk_psock_skb_ingress_self(psock, skb);
834                 }
835                 if (err < 0) {
836                         skb_queue_tail(&psock->ingress_skb, skb);
837                         schedule_work(&psock->work);
838                 }
839                 break;
840         case __SK_REDIRECT:
841                 sk_psock_skb_redirect(skb);
842                 break;
843         case __SK_DROP:
844                 /* fall-through */
845         default:
846 out_free:
847                 kfree_skb(skb);
848         }
849 }
850
851 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
852 {
853         struct sk_psock *psock;
854         struct bpf_prog *prog;
855         int ret = __SK_DROP;
856         struct sock *sk;
857
858         rcu_read_lock();
859         sk = strp->sk;
860         psock = sk_psock(sk);
861         if (unlikely(!psock)) {
862                 kfree_skb(skb);
863                 goto out;
864         }
865         prog = READ_ONCE(psock->progs.skb_verdict);
866         if (likely(prog)) {
867                 skb_orphan(skb);
868                 tcp_skb_bpf_redirect_clear(skb);
869                 ret = sk_psock_bpf_run(psock, prog, skb);
870                 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
871         }
872         sk_psock_verdict_apply(psock, skb, ret);
873 out:
874         rcu_read_unlock();
875 }
876
877 static int sk_psock_strp_read_done(struct strparser *strp, int err)
878 {
879         return err;
880 }
881
882 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
883 {
884         struct sk_psock *psock = sk_psock_from_strp(strp);
885         struct bpf_prog *prog;
886         int ret = skb->len;
887
888         rcu_read_lock();
889         prog = READ_ONCE(psock->progs.skb_parser);
890         if (likely(prog))
891                 ret = sk_psock_bpf_run(psock, prog, skb);
892         rcu_read_unlock();
893         return ret;
894 }
895
896 /* Called with socket lock held. */
897 static void sk_psock_strp_data_ready(struct sock *sk)
898 {
899         struct sk_psock *psock;
900
901         rcu_read_lock();
902         psock = sk_psock(sk);
903         if (likely(psock)) {
904                 if (tls_sw_has_ctx_rx(sk)) {
905                         psock->parser.saved_data_ready(sk);
906                 } else {
907                         write_lock_bh(&sk->sk_callback_lock);
908                         strp_data_ready(&psock->parser.strp);
909                         write_unlock_bh(&sk->sk_callback_lock);
910                 }
911         }
912         rcu_read_unlock();
913 }
914
915 static void sk_psock_write_space(struct sock *sk)
916 {
917         struct sk_psock *psock;
918         void (*write_space)(struct sock *sk) = NULL;
919
920         rcu_read_lock();
921         psock = sk_psock(sk);
922         if (likely(psock)) {
923                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
924                         schedule_work(&psock->work);
925                 write_space = psock->saved_write_space;
926         }
927         rcu_read_unlock();
928         if (write_space)
929                 write_space(sk);
930 }
931
932 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
933 {
934         static const struct strp_callbacks cb = {
935                 .rcv_msg        = sk_psock_strp_read,
936                 .read_sock_done = sk_psock_strp_read_done,
937                 .parse_msg      = sk_psock_strp_parse,
938         };
939
940         psock->parser.enabled = false;
941         return strp_init(&psock->parser.strp, sk, &cb);
942 }
943
944 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
945 {
946         struct sk_psock_parser *parser = &psock->parser;
947
948         if (parser->enabled)
949                 return;
950
951         parser->saved_data_ready = sk->sk_data_ready;
952         sk->sk_data_ready = sk_psock_strp_data_ready;
953         sk->sk_write_space = sk_psock_write_space;
954         parser->enabled = true;
955 }
956
957 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
958 {
959         struct sk_psock_parser *parser = &psock->parser;
960
961         if (!parser->enabled)
962                 return;
963
964         sk->sk_data_ready = parser->saved_data_ready;
965         parser->saved_data_ready = NULL;
966         strp_stop(&parser->strp);
967         parser->enabled = false;
968 }