Mention branches and keyring.
[releases.git] / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         u32 osize = msg->sg.size;
31         int ret = 0;
32
33         len -= msg->sg.size;
34         while (len > 0) {
35                 struct scatterlist *sge;
36                 u32 orig_offset;
37                 int use, i;
38
39                 if (!sk_page_frag_refill(sk, pfrag)) {
40                         ret = -ENOMEM;
41                         goto msg_trim;
42                 }
43
44                 orig_offset = pfrag->offset;
45                 use = min_t(int, len, pfrag->size - orig_offset);
46                 if (!sk_wmem_schedule(sk, use)) {
47                         ret = -ENOMEM;
48                         goto msg_trim;
49                 }
50
51                 i = msg->sg.end;
52                 sk_msg_iter_var_prev(i);
53                 sge = &msg->sg.data[i];
54
55                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56                     sg_page(sge) == pfrag->page &&
57                     sge->offset + sge->length == orig_offset) {
58                         sge->length += use;
59                 } else {
60                         if (sk_msg_full(msg)) {
61                                 ret = -ENOSPC;
62                                 break;
63                         }
64
65                         sge = &msg->sg.data[msg->sg.end];
66                         sg_unmark_end(sge);
67                         sg_set_page(sge, pfrag->page, use, orig_offset);
68                         get_page(pfrag->page);
69                         sk_msg_iter_next(msg, end);
70                 }
71
72                 sk_mem_charge(sk, use);
73                 msg->sg.size += use;
74                 pfrag->offset += use;
75                 len -= use;
76         }
77
78         return ret;
79
80 msg_trim:
81         sk_msg_trim(sk, msg, osize);
82         return ret;
83 }
84 EXPORT_SYMBOL_GPL(sk_msg_alloc);
85
86 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87                  u32 off, u32 len)
88 {
89         int i = src->sg.start;
90         struct scatterlist *sge = sk_msg_elem(src, i);
91         struct scatterlist *sgd = NULL;
92         u32 sge_len, sge_off;
93
94         while (off) {
95                 if (sge->length > off)
96                         break;
97                 off -= sge->length;
98                 sk_msg_iter_var_next(i);
99                 if (i == src->sg.end && off)
100                         return -ENOSPC;
101                 sge = sk_msg_elem(src, i);
102         }
103
104         while (len) {
105                 sge_len = sge->length - off;
106                 if (sge_len > len)
107                         sge_len = len;
108
109                 if (dst->sg.end)
110                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
111
112                 if (sgd &&
113                     (sg_page(sge) == sg_page(sgd)) &&
114                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115                         sgd->length += sge_len;
116                         dst->sg.size += sge_len;
117                 } else if (!sk_msg_full(dst)) {
118                         sge_off = sge->offset + off;
119                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120                 } else {
121                         return -ENOSPC;
122                 }
123
124                 off = 0;
125                 len -= sge_len;
126                 sk_mem_charge(sk, sge_len);
127                 sk_msg_iter_var_next(i);
128                 if (i == src->sg.end && len)
129                         return -ENOSPC;
130                 sge = sk_msg_elem(src, i);
131         }
132
133         return 0;
134 }
135 EXPORT_SYMBOL_GPL(sk_msg_clone);
136
137 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138 {
139         int i = msg->sg.start;
140
141         do {
142                 struct scatterlist *sge = sk_msg_elem(msg, i);
143
144                 if (bytes < sge->length) {
145                         sge->length -= bytes;
146                         sge->offset += bytes;
147                         sk_mem_uncharge(sk, bytes);
148                         break;
149                 }
150
151                 sk_mem_uncharge(sk, sge->length);
152                 bytes -= sge->length;
153                 sge->length = 0;
154                 sge->offset = 0;
155                 sk_msg_iter_var_next(i);
156         } while (bytes && i != msg->sg.end);
157         msg->sg.start = i;
158 }
159 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160
161 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162 {
163         int i = msg->sg.start;
164
165         do {
166                 struct scatterlist *sge = &msg->sg.data[i];
167                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
168
169                 sk_mem_uncharge(sk, uncharge);
170                 bytes -= uncharge;
171                 sk_msg_iter_var_next(i);
172         } while (i != msg->sg.end);
173 }
174 EXPORT_SYMBOL_GPL(sk_msg_return);
175
176 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177                             bool charge)
178 {
179         struct scatterlist *sge = sk_msg_elem(msg, i);
180         u32 len = sge->length;
181
182         /* When the skb owns the memory we free it from consume_skb path. */
183         if (!msg->skb) {
184                 if (charge)
185                         sk_mem_uncharge(sk, len);
186                 put_page(sg_page(sge));
187         }
188         memset(sge, 0, sizeof(*sge));
189         return len;
190 }
191
192 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193                          bool charge)
194 {
195         struct scatterlist *sge = sk_msg_elem(msg, i);
196         int freed = 0;
197
198         while (msg->sg.size) {
199                 msg->sg.size -= sge->length;
200                 freed += sk_msg_free_elem(sk, msg, i, charge);
201                 sk_msg_iter_var_next(i);
202                 sk_msg_check_to_free(msg, i, msg->sg.size);
203                 sge = sk_msg_elem(msg, i);
204         }
205         consume_skb(msg->skb);
206         sk_msg_init(msg);
207         return freed;
208 }
209
210 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211 {
212         return __sk_msg_free(sk, msg, msg->sg.start, false);
213 }
214 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215
216 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217 {
218         return __sk_msg_free(sk, msg, msg->sg.start, true);
219 }
220 EXPORT_SYMBOL_GPL(sk_msg_free);
221
222 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223                                   u32 bytes, bool charge)
224 {
225         struct scatterlist *sge;
226         u32 i = msg->sg.start;
227
228         while (bytes) {
229                 sge = sk_msg_elem(msg, i);
230                 if (!sge->length)
231                         break;
232                 if (bytes < sge->length) {
233                         if (charge)
234                                 sk_mem_uncharge(sk, bytes);
235                         sge->length -= bytes;
236                         sge->offset += bytes;
237                         msg->sg.size -= bytes;
238                         break;
239                 }
240
241                 msg->sg.size -= sge->length;
242                 bytes -= sge->length;
243                 sk_msg_free_elem(sk, msg, i, charge);
244                 sk_msg_iter_var_next(i);
245                 sk_msg_check_to_free(msg, i, bytes);
246         }
247         msg->sg.start = i;
248 }
249
250 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251 {
252         __sk_msg_free_partial(sk, msg, bytes, true);
253 }
254 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255
256 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257                                   u32 bytes)
258 {
259         __sk_msg_free_partial(sk, msg, bytes, false);
260 }
261
262 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263 {
264         int trim = msg->sg.size - len;
265         u32 i = msg->sg.end;
266
267         if (trim <= 0) {
268                 WARN_ON(trim < 0);
269                 return;
270         }
271
272         sk_msg_iter_var_prev(i);
273         msg->sg.size = len;
274         while (msg->sg.data[i].length &&
275                trim >= msg->sg.data[i].length) {
276                 trim -= msg->sg.data[i].length;
277                 sk_msg_free_elem(sk, msg, i, true);
278                 sk_msg_iter_var_prev(i);
279                 if (!trim)
280                         goto out;
281         }
282
283         msg->sg.data[i].length -= trim;
284         sk_mem_uncharge(sk, trim);
285         /* Adjust copybreak if it falls into the trimmed part of last buf */
286         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287                 msg->sg.copybreak = msg->sg.data[i].length;
288 out:
289         sk_msg_iter_var_next(i);
290         msg->sg.end = i;
291
292         /* If we trim data a full sg elem before curr pointer update
293          * copybreak and current so that any future copy operations
294          * start at new copy location.
295          * However trimed data that has not yet been used in a copy op
296          * does not require an update.
297          */
298         if (!msg->sg.size) {
299                 msg->sg.curr = msg->sg.start;
300                 msg->sg.copybreak = 0;
301         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303                 sk_msg_iter_var_prev(i);
304                 msg->sg.curr = i;
305                 msg->sg.copybreak = msg->sg.data[i].length;
306         }
307 }
308 EXPORT_SYMBOL_GPL(sk_msg_trim);
309
310 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311                               struct sk_msg *msg, u32 bytes)
312 {
313         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314         const int to_max_pages = MAX_MSG_FRAGS;
315         struct page *pages[MAX_MSG_FRAGS];
316         ssize_t orig, copied, use, offset;
317
318         orig = msg->sg.size;
319         while (bytes > 0) {
320                 i = 0;
321                 maxpages = to_max_pages - num_elems;
322                 if (maxpages == 0) {
323                         ret = -EFAULT;
324                         goto out;
325                 }
326
327                 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
328                                             &offset);
329                 if (copied <= 0) {
330                         ret = -EFAULT;
331                         goto out;
332                 }
333
334                 bytes -= copied;
335                 msg->sg.size += copied;
336
337                 while (copied) {
338                         use = min_t(int, copied, PAGE_SIZE - offset);
339                         sg_set_page(&msg->sg.data[msg->sg.end],
340                                     pages[i], use, offset);
341                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
342                         sk_mem_charge(sk, use);
343
344                         offset = 0;
345                         copied -= use;
346                         sk_msg_iter_next(msg, end);
347                         num_elems++;
348                         i++;
349                 }
350                 /* When zerocopy is mixed with sk_msg_*copy* operations we
351                  * may have a copybreak set in this case clear and prefer
352                  * zerocopy remainder when possible.
353                  */
354                 msg->sg.copybreak = 0;
355                 msg->sg.curr = msg->sg.end;
356         }
357 out:
358         /* Revert iov_iter updates, msg will need to use 'trim' later if it
359          * also needs to be cleared.
360          */
361         if (ret)
362                 iov_iter_revert(from, msg->sg.size - orig);
363         return ret;
364 }
365 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
366
367 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
368                              struct sk_msg *msg, u32 bytes)
369 {
370         int ret = -ENOSPC, i = msg->sg.curr;
371         struct scatterlist *sge;
372         u32 copy, buf_size;
373         void *to;
374
375         do {
376                 sge = sk_msg_elem(msg, i);
377                 /* This is possible if a trim operation shrunk the buffer */
378                 if (msg->sg.copybreak >= sge->length) {
379                         msg->sg.copybreak = 0;
380                         sk_msg_iter_var_next(i);
381                         if (i == msg->sg.end)
382                                 break;
383                         sge = sk_msg_elem(msg, i);
384                 }
385
386                 buf_size = sge->length - msg->sg.copybreak;
387                 copy = (buf_size > bytes) ? bytes : buf_size;
388                 to = sg_virt(sge) + msg->sg.copybreak;
389                 msg->sg.copybreak += copy;
390                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
391                         ret = copy_from_iter_nocache(to, copy, from);
392                 else
393                         ret = copy_from_iter(to, copy, from);
394                 if (ret != copy) {
395                         ret = -EFAULT;
396                         goto out;
397                 }
398                 bytes -= copy;
399                 if (!bytes)
400                         break;
401                 msg->sg.copybreak = 0;
402                 sk_msg_iter_var_next(i);
403         } while (i != msg->sg.end);
404 out:
405         msg->sg.curr = i;
406         return ret;
407 }
408 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
409
410 /* Receive sk_msg from psock->ingress_msg to @msg. */
411 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
412                    int len, int flags)
413 {
414         struct iov_iter *iter = &msg->msg_iter;
415         int peek = flags & MSG_PEEK;
416         struct sk_msg *msg_rx;
417         int i, copied = 0;
418
419         msg_rx = sk_psock_peek_msg(psock);
420         while (copied != len) {
421                 struct scatterlist *sge;
422
423                 if (unlikely(!msg_rx))
424                         break;
425
426                 i = msg_rx->sg.start;
427                 do {
428                         struct page *page;
429                         int copy;
430
431                         sge = sk_msg_elem(msg_rx, i);
432                         copy = sge->length;
433                         page = sg_page(sge);
434                         if (copied + copy > len)
435                                 copy = len - copied;
436                         copy = copy_page_to_iter(page, sge->offset, copy, iter);
437                         if (!copy) {
438                                 copied = copied ? copied : -EFAULT;
439                                 goto out;
440                         }
441
442                         copied += copy;
443                         if (likely(!peek)) {
444                                 sge->offset += copy;
445                                 sge->length -= copy;
446                                 if (!msg_rx->skb)
447                                         sk_mem_uncharge(sk, copy);
448                                 msg_rx->sg.size -= copy;
449
450                                 if (!sge->length) {
451                                         sk_msg_iter_var_next(i);
452                                         if (!msg_rx->skb)
453                                                 put_page(page);
454                                 }
455                         } else {
456                                 /* Lets not optimize peek case if copy_page_to_iter
457                                  * didn't copy the entire length lets just break.
458                                  */
459                                 if (copy != sge->length)
460                                         goto out;
461                                 sk_msg_iter_var_next(i);
462                         }
463
464                         if (copied == len)
465                                 break;
466                 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
467
468                 if (unlikely(peek)) {
469                         msg_rx = sk_psock_next_msg(psock, msg_rx);
470                         if (!msg_rx)
471                                 break;
472                         continue;
473                 }
474
475                 msg_rx->sg.start = i;
476                 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
477                         msg_rx = sk_psock_dequeue_msg(psock);
478                         kfree_sk_msg(msg_rx);
479                 }
480                 msg_rx = sk_psock_peek_msg(psock);
481         }
482 out:
483         return copied;
484 }
485 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
486
487 bool sk_msg_is_readable(struct sock *sk)
488 {
489         struct sk_psock *psock;
490         bool empty = true;
491
492         rcu_read_lock();
493         psock = sk_psock(sk);
494         if (likely(psock))
495                 empty = list_empty(&psock->ingress_msg);
496         rcu_read_unlock();
497         return !empty;
498 }
499 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
500
501 static struct sk_msg *alloc_sk_msg(gfp_t gfp)
502 {
503         struct sk_msg *msg;
504
505         msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
506         if (unlikely(!msg))
507                 return NULL;
508         sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
509         return msg;
510 }
511
512 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
513                                                   struct sk_buff *skb)
514 {
515         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
516                 return NULL;
517
518         if (!sk_rmem_schedule(sk, skb, skb->truesize))
519                 return NULL;
520
521         return alloc_sk_msg(GFP_KERNEL);
522 }
523
524 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
525                                         u32 off, u32 len,
526                                         struct sk_psock *psock,
527                                         struct sock *sk,
528                                         struct sk_msg *msg)
529 {
530         int num_sge, copied;
531
532         num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
533         if (num_sge < 0) {
534                 /* skb linearize may fail with ENOMEM, but lets simply try again
535                  * later if this happens. Under memory pressure we don't want to
536                  * drop the skb. We need to linearize the skb so that the mapping
537                  * in skb_to_sgvec can not error.
538                  */
539                 if (skb_linearize(skb))
540                         return -EAGAIN;
541
542                 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
543                 if (unlikely(num_sge < 0))
544                         return num_sge;
545         }
546
547         copied = len;
548         msg->sg.start = 0;
549         msg->sg.size = copied;
550         msg->sg.end = num_sge;
551         msg->skb = skb;
552
553         sk_psock_queue_msg(psock, msg);
554         sk_psock_data_ready(sk, psock);
555         return copied;
556 }
557
558 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
559                                      u32 off, u32 len);
560
561 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
562                                 u32 off, u32 len)
563 {
564         struct sock *sk = psock->sk;
565         struct sk_msg *msg;
566         int err;
567
568         /* If we are receiving on the same sock skb->sk is already assigned,
569          * skip memory accounting and owner transition seeing it already set
570          * correctly.
571          */
572         if (unlikely(skb->sk == sk))
573                 return sk_psock_skb_ingress_self(psock, skb, off, len);
574         msg = sk_psock_create_ingress_msg(sk, skb);
575         if (!msg)
576                 return -EAGAIN;
577
578         /* This will transition ownership of the data from the socket where
579          * the BPF program was run initiating the redirect to the socket
580          * we will eventually receive this data on. The data will be released
581          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
582          * into user buffers.
583          */
584         skb_set_owner_r(skb, sk);
585         err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
586         if (err < 0)
587                 kfree(msg);
588         return err;
589 }
590
591 /* Puts an skb on the ingress queue of the socket already assigned to the
592  * skb. In this case we do not need to check memory limits or skb_set_owner_r
593  * because the skb is already accounted for here.
594  */
595 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
596                                      u32 off, u32 len)
597 {
598         struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
599         struct sock *sk = psock->sk;
600         int err;
601
602         if (unlikely(!msg))
603                 return -EAGAIN;
604         skb_set_owner_r(skb, sk);
605         err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
606         if (err < 0)
607                 kfree(msg);
608         return err;
609 }
610
611 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
612                                u32 off, u32 len, bool ingress)
613 {
614         int err = 0;
615
616         if (!ingress) {
617                 if (!sock_writeable(psock->sk))
618                         return -EAGAIN;
619                 return skb_send_sock(psock->sk, skb, off, len);
620         }
621         skb_get(skb);
622         err = sk_psock_skb_ingress(psock, skb, off, len);
623         if (err < 0)
624                 kfree_skb(skb);
625         return err;
626 }
627
628 static void sk_psock_skb_state(struct sk_psock *psock,
629                                struct sk_psock_work_state *state,
630                                int len, int off)
631 {
632         spin_lock_bh(&psock->ingress_lock);
633         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
634                 state->len = len;
635                 state->off = off;
636         }
637         spin_unlock_bh(&psock->ingress_lock);
638 }
639
640 static void sk_psock_backlog(struct work_struct *work)
641 {
642         struct delayed_work *dwork = to_delayed_work(work);
643         struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
644         struct sk_psock_work_state *state = &psock->work_state;
645         struct sk_buff *skb = NULL;
646         u32 len = 0, off = 0;
647         bool ingress;
648         int ret;
649
650         mutex_lock(&psock->work_mutex);
651         if (unlikely(state->len)) {
652                 len = state->len;
653                 off = state->off;
654         }
655
656         while ((skb = skb_peek(&psock->ingress_skb))) {
657                 len = skb->len;
658                 off = 0;
659                 if (skb_bpf_strparser(skb)) {
660                         struct strp_msg *stm = strp_msg(skb);
661
662                         off = stm->offset;
663                         len = stm->full_len;
664                 }
665                 ingress = skb_bpf_ingress(skb);
666                 skb_bpf_redirect_clear(skb);
667                 do {
668                         ret = -EIO;
669                         if (!sock_flag(psock->sk, SOCK_DEAD))
670                                 ret = sk_psock_handle_skb(psock, skb, off,
671                                                           len, ingress);
672                         if (ret <= 0) {
673                                 if (ret == -EAGAIN) {
674                                         sk_psock_skb_state(psock, state, len, off);
675
676                                         /* Delay slightly to prioritize any
677                                          * other work that might be here.
678                                          */
679                                         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
680                                                 schedule_delayed_work(&psock->work, 1);
681                                         goto end;
682                                 }
683                                 /* Hard errors break pipe and stop xmit. */
684                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
685                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
686                                 goto end;
687                         }
688                         off += ret;
689                         len -= ret;
690                 } while (len);
691
692                 skb = skb_dequeue(&psock->ingress_skb);
693                 kfree_skb(skb);
694         }
695 end:
696         mutex_unlock(&psock->work_mutex);
697 }
698
699 struct sk_psock *sk_psock_init(struct sock *sk, int node)
700 {
701         struct sk_psock *psock;
702         struct proto *prot;
703
704         write_lock_bh(&sk->sk_callback_lock);
705
706         if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
707                 psock = ERR_PTR(-EINVAL);
708                 goto out;
709         }
710
711         if (sk->sk_user_data) {
712                 psock = ERR_PTR(-EBUSY);
713                 goto out;
714         }
715
716         psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
717         if (!psock) {
718                 psock = ERR_PTR(-ENOMEM);
719                 goto out;
720         }
721
722         prot = READ_ONCE(sk->sk_prot);
723         psock->sk = sk;
724         psock->eval = __SK_NONE;
725         psock->sk_proto = prot;
726         psock->saved_unhash = prot->unhash;
727         psock->saved_destroy = prot->destroy;
728         psock->saved_close = prot->close;
729         psock->saved_write_space = sk->sk_write_space;
730
731         INIT_LIST_HEAD(&psock->link);
732         spin_lock_init(&psock->link_lock);
733
734         INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
735         mutex_init(&psock->work_mutex);
736         INIT_LIST_HEAD(&psock->ingress_msg);
737         spin_lock_init(&psock->ingress_lock);
738         skb_queue_head_init(&psock->ingress_skb);
739
740         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
741         refcount_set(&psock->refcnt, 1);
742
743         __rcu_assign_sk_user_data_with_flags(sk, psock,
744                                              SK_USER_DATA_NOCOPY |
745                                              SK_USER_DATA_PSOCK);
746         sock_hold(sk);
747
748 out:
749         write_unlock_bh(&sk->sk_callback_lock);
750         return psock;
751 }
752 EXPORT_SYMBOL_GPL(sk_psock_init);
753
754 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
755 {
756         struct sk_psock_link *link;
757
758         spin_lock_bh(&psock->link_lock);
759         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
760                                         list);
761         if (link)
762                 list_del(&link->list);
763         spin_unlock_bh(&psock->link_lock);
764         return link;
765 }
766
767 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
768 {
769         struct sk_msg *msg, *tmp;
770
771         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
772                 list_del(&msg->list);
773                 sk_msg_free(psock->sk, msg);
774                 kfree(msg);
775         }
776 }
777
778 static void __sk_psock_zap_ingress(struct sk_psock *psock)
779 {
780         struct sk_buff *skb;
781
782         while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
783                 skb_bpf_redirect_clear(skb);
784                 sock_drop(psock->sk, skb);
785         }
786         __sk_psock_purge_ingress_msg(psock);
787 }
788
789 static void sk_psock_link_destroy(struct sk_psock *psock)
790 {
791         struct sk_psock_link *link, *tmp;
792
793         list_for_each_entry_safe(link, tmp, &psock->link, list) {
794                 list_del(&link->list);
795                 sk_psock_free_link(link);
796         }
797 }
798
799 void sk_psock_stop(struct sk_psock *psock)
800 {
801         spin_lock_bh(&psock->ingress_lock);
802         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
803         sk_psock_cork_free(psock);
804         spin_unlock_bh(&psock->ingress_lock);
805 }
806
807 static void sk_psock_done_strp(struct sk_psock *psock);
808
809 static void sk_psock_destroy(struct work_struct *work)
810 {
811         struct sk_psock *psock = container_of(to_rcu_work(work),
812                                               struct sk_psock, rwork);
813         /* No sk_callback_lock since already detached. */
814
815         sk_psock_done_strp(psock);
816
817         cancel_delayed_work_sync(&psock->work);
818         __sk_psock_zap_ingress(psock);
819         mutex_destroy(&psock->work_mutex);
820
821         psock_progs_drop(&psock->progs);
822
823         sk_psock_link_destroy(psock);
824         sk_psock_cork_free(psock);
825
826         if (psock->sk_redir)
827                 sock_put(psock->sk_redir);
828         if (psock->sk_pair)
829                 sock_put(psock->sk_pair);
830         sock_put(psock->sk);
831         kfree(psock);
832 }
833
834 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
835 {
836         write_lock_bh(&sk->sk_callback_lock);
837         sk_psock_restore_proto(sk, psock);
838         rcu_assign_sk_user_data(sk, NULL);
839         if (psock->progs.stream_parser)
840                 sk_psock_stop_strp(sk, psock);
841         else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
842                 sk_psock_stop_verdict(sk, psock);
843         write_unlock_bh(&sk->sk_callback_lock);
844
845         sk_psock_stop(psock);
846
847         INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
848         queue_rcu_work(system_wq, &psock->rwork);
849 }
850 EXPORT_SYMBOL_GPL(sk_psock_drop);
851
852 static int sk_psock_map_verd(int verdict, bool redir)
853 {
854         switch (verdict) {
855         case SK_PASS:
856                 return redir ? __SK_REDIRECT : __SK_PASS;
857         case SK_DROP:
858         default:
859                 break;
860         }
861
862         return __SK_DROP;
863 }
864
865 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
866                          struct sk_msg *msg)
867 {
868         struct bpf_prog *prog;
869         int ret;
870
871         rcu_read_lock();
872         prog = READ_ONCE(psock->progs.msg_parser);
873         if (unlikely(!prog)) {
874                 ret = __SK_PASS;
875                 goto out;
876         }
877
878         sk_msg_compute_data_pointers(msg);
879         msg->sk = sk;
880         ret = bpf_prog_run_pin_on_cpu(prog, msg);
881         ret = sk_psock_map_verd(ret, msg->sk_redir);
882         psock->apply_bytes = msg->apply_bytes;
883         if (ret == __SK_REDIRECT) {
884                 if (psock->sk_redir) {
885                         sock_put(psock->sk_redir);
886                         psock->sk_redir = NULL;
887                 }
888                 if (!msg->sk_redir) {
889                         ret = __SK_DROP;
890                         goto out;
891                 }
892                 psock->redir_ingress = sk_msg_to_ingress(msg);
893                 psock->sk_redir = msg->sk_redir;
894                 sock_hold(psock->sk_redir);
895         }
896 out:
897         rcu_read_unlock();
898         return ret;
899 }
900 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
901
902 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
903 {
904         struct sk_psock *psock_other;
905         struct sock *sk_other;
906
907         sk_other = skb_bpf_redirect_fetch(skb);
908         /* This error is a buggy BPF program, it returned a redirect
909          * return code, but then didn't set a redirect interface.
910          */
911         if (unlikely(!sk_other)) {
912                 skb_bpf_redirect_clear(skb);
913                 sock_drop(from->sk, skb);
914                 return -EIO;
915         }
916         psock_other = sk_psock(sk_other);
917         /* This error indicates the socket is being torn down or had another
918          * error that caused the pipe to break. We can't send a packet on
919          * a socket that is in this state so we drop the skb.
920          */
921         if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
922                 skb_bpf_redirect_clear(skb);
923                 sock_drop(from->sk, skb);
924                 return -EIO;
925         }
926         spin_lock_bh(&psock_other->ingress_lock);
927         if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
928                 spin_unlock_bh(&psock_other->ingress_lock);
929                 skb_bpf_redirect_clear(skb);
930                 sock_drop(from->sk, skb);
931                 return -EIO;
932         }
933
934         skb_queue_tail(&psock_other->ingress_skb, skb);
935         schedule_delayed_work(&psock_other->work, 0);
936         spin_unlock_bh(&psock_other->ingress_lock);
937         return 0;
938 }
939
940 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
941                                        struct sk_psock *from, int verdict)
942 {
943         switch (verdict) {
944         case __SK_REDIRECT:
945                 sk_psock_skb_redirect(from, skb);
946                 break;
947         case __SK_PASS:
948         case __SK_DROP:
949         default:
950                 break;
951         }
952 }
953
954 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
955 {
956         struct bpf_prog *prog;
957         int ret = __SK_PASS;
958
959         rcu_read_lock();
960         prog = READ_ONCE(psock->progs.stream_verdict);
961         if (likely(prog)) {
962                 skb->sk = psock->sk;
963                 skb_dst_drop(skb);
964                 skb_bpf_redirect_clear(skb);
965                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
966                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
967                 skb->sk = NULL;
968         }
969         sk_psock_tls_verdict_apply(skb, psock, ret);
970         rcu_read_unlock();
971         return ret;
972 }
973 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
974
975 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
976                                   int verdict)
977 {
978         struct sock *sk_other;
979         int err = 0;
980         u32 len, off;
981
982         switch (verdict) {
983         case __SK_PASS:
984                 err = -EIO;
985                 sk_other = psock->sk;
986                 if (sock_flag(sk_other, SOCK_DEAD) ||
987                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
988                         goto out_free;
989
990                 skb_bpf_set_ingress(skb);
991
992                 /* If the queue is empty then we can submit directly
993                  * into the msg queue. If its not empty we have to
994                  * queue work otherwise we may get OOO data. Otherwise,
995                  * if sk_psock_skb_ingress errors will be handled by
996                  * retrying later from workqueue.
997                  */
998                 if (skb_queue_empty(&psock->ingress_skb)) {
999                         len = skb->len;
1000                         off = 0;
1001                         if (skb_bpf_strparser(skb)) {
1002                                 struct strp_msg *stm = strp_msg(skb);
1003
1004                                 off = stm->offset;
1005                                 len = stm->full_len;
1006                         }
1007                         err = sk_psock_skb_ingress_self(psock, skb, off, len);
1008                 }
1009                 if (err < 0) {
1010                         spin_lock_bh(&psock->ingress_lock);
1011                         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1012                                 skb_queue_tail(&psock->ingress_skb, skb);
1013                                 schedule_delayed_work(&psock->work, 0);
1014                                 err = 0;
1015                         }
1016                         spin_unlock_bh(&psock->ingress_lock);
1017                         if (err < 0)
1018                                 goto out_free;
1019                 }
1020                 break;
1021         case __SK_REDIRECT:
1022                 tcp_eat_skb(psock->sk, skb);
1023                 err = sk_psock_skb_redirect(psock, skb);
1024                 break;
1025         case __SK_DROP:
1026         default:
1027 out_free:
1028                 skb_bpf_redirect_clear(skb);
1029                 tcp_eat_skb(psock->sk, skb);
1030                 sock_drop(psock->sk, skb);
1031         }
1032
1033         return err;
1034 }
1035
1036 static void sk_psock_write_space(struct sock *sk)
1037 {
1038         struct sk_psock *psock;
1039         void (*write_space)(struct sock *sk) = NULL;
1040
1041         rcu_read_lock();
1042         psock = sk_psock(sk);
1043         if (likely(psock)) {
1044                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1045                         schedule_delayed_work(&psock->work, 0);
1046                 write_space = psock->saved_write_space;
1047         }
1048         rcu_read_unlock();
1049         if (write_space)
1050                 write_space(sk);
1051 }
1052
1053 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1054 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1055 {
1056         struct sk_psock *psock;
1057         struct bpf_prog *prog;
1058         int ret = __SK_DROP;
1059         struct sock *sk;
1060
1061         rcu_read_lock();
1062         sk = strp->sk;
1063         psock = sk_psock(sk);
1064         if (unlikely(!psock)) {
1065                 sock_drop(sk, skb);
1066                 goto out;
1067         }
1068         prog = READ_ONCE(psock->progs.stream_verdict);
1069         if (likely(prog)) {
1070                 skb->sk = sk;
1071                 skb_dst_drop(skb);
1072                 skb_bpf_redirect_clear(skb);
1073                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1074                 skb_bpf_set_strparser(skb);
1075                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1076                 skb->sk = NULL;
1077         }
1078         sk_psock_verdict_apply(psock, skb, ret);
1079 out:
1080         rcu_read_unlock();
1081 }
1082
1083 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1084 {
1085         return err;
1086 }
1087
1088 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1089 {
1090         struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1091         struct bpf_prog *prog;
1092         int ret = skb->len;
1093
1094         rcu_read_lock();
1095         prog = READ_ONCE(psock->progs.stream_parser);
1096         if (likely(prog)) {
1097                 skb->sk = psock->sk;
1098                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1099                 skb->sk = NULL;
1100         }
1101         rcu_read_unlock();
1102         return ret;
1103 }
1104
1105 /* Called with socket lock held. */
1106 static void sk_psock_strp_data_ready(struct sock *sk)
1107 {
1108         struct sk_psock *psock;
1109
1110         rcu_read_lock();
1111         psock = sk_psock(sk);
1112         if (likely(psock)) {
1113                 if (tls_sw_has_ctx_rx(sk)) {
1114                         psock->saved_data_ready(sk);
1115                 } else {
1116                         write_lock_bh(&sk->sk_callback_lock);
1117                         strp_data_ready(&psock->strp);
1118                         write_unlock_bh(&sk->sk_callback_lock);
1119                 }
1120         }
1121         rcu_read_unlock();
1122 }
1123
1124 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1125 {
1126         int ret;
1127
1128         static const struct strp_callbacks cb = {
1129                 .rcv_msg        = sk_psock_strp_read,
1130                 .read_sock_done = sk_psock_strp_read_done,
1131                 .parse_msg      = sk_psock_strp_parse,
1132         };
1133
1134         ret = strp_init(&psock->strp, sk, &cb);
1135         if (!ret)
1136                 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED);
1137
1138         return ret;
1139 }
1140
1141 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1142 {
1143         if (psock->saved_data_ready)
1144                 return;
1145
1146         psock->saved_data_ready = sk->sk_data_ready;
1147         sk->sk_data_ready = sk_psock_strp_data_ready;
1148         sk->sk_write_space = sk_psock_write_space;
1149 }
1150
1151 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1152 {
1153         psock_set_prog(&psock->progs.stream_parser, NULL);
1154
1155         if (!psock->saved_data_ready)
1156                 return;
1157
1158         sk->sk_data_ready = psock->saved_data_ready;
1159         psock->saved_data_ready = NULL;
1160         strp_stop(&psock->strp);
1161 }
1162
1163 static void sk_psock_done_strp(struct sk_psock *psock)
1164 {
1165         /* Parser has been stopped */
1166         if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED))
1167                 strp_done(&psock->strp);
1168 }
1169 #else
1170 static void sk_psock_done_strp(struct sk_psock *psock)
1171 {
1172 }
1173 #endif /* CONFIG_BPF_STREAM_PARSER */
1174
1175 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
1176 {
1177         struct sk_psock *psock;
1178         struct bpf_prog *prog;
1179         int ret = __SK_DROP;
1180         int len = skb->len;
1181
1182         rcu_read_lock();
1183         psock = sk_psock(sk);
1184         if (unlikely(!psock)) {
1185                 len = 0;
1186                 tcp_eat_skb(sk, skb);
1187                 sock_drop(sk, skb);
1188                 goto out;
1189         }
1190         prog = READ_ONCE(psock->progs.stream_verdict);
1191         if (!prog)
1192                 prog = READ_ONCE(psock->progs.skb_verdict);
1193         if (likely(prog)) {
1194                 skb_dst_drop(skb);
1195                 skb_bpf_redirect_clear(skb);
1196                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1197                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1198         }
1199         ret = sk_psock_verdict_apply(psock, skb, ret);
1200         if (ret < 0)
1201                 len = ret;
1202 out:
1203         rcu_read_unlock();
1204         return len;
1205 }
1206
1207 static void sk_psock_verdict_data_ready(struct sock *sk)
1208 {
1209         struct socket *sock = sk->sk_socket;
1210         int copied;
1211
1212         if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
1213                 return;
1214         copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
1215         if (copied >= 0) {
1216                 struct sk_psock *psock;
1217
1218                 rcu_read_lock();
1219                 psock = sk_psock(sk);
1220                 if (psock) {
1221                         read_lock_bh(&sk->sk_callback_lock);
1222                         sk_psock_data_ready(sk, psock);
1223                         read_unlock_bh(&sk->sk_callback_lock);
1224                 }
1225                 rcu_read_unlock();
1226         }
1227 }
1228
1229 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1230 {
1231         if (psock->saved_data_ready)
1232                 return;
1233
1234         psock->saved_data_ready = sk->sk_data_ready;
1235         sk->sk_data_ready = sk_psock_verdict_data_ready;
1236         sk->sk_write_space = sk_psock_write_space;
1237 }
1238
1239 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1240 {
1241         psock_set_prog(&psock->progs.stream_verdict, NULL);
1242         psock_set_prog(&psock->progs.skb_verdict, NULL);
1243
1244         if (!psock->saved_data_ready)
1245                 return;
1246
1247         sk->sk_data_ready = psock->saved_data_ready;
1248         psock->saved_data_ready = NULL;
1249 }