GNU Linux-libre 5.13.14-gnu1
[releases.git] / net / core / skmsg.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14         if (msg->sg.end > msg->sg.start &&
15             elem_first_coalesce < msg->sg.end)
16                 return true;
17
18         if (msg->sg.end < msg->sg.start &&
19             (elem_first_coalesce > msg->sg.start ||
20              elem_first_coalesce < msg->sg.end))
21                 return true;
22
23         return false;
24 }
25
26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27                  int elem_first_coalesce)
28 {
29         struct page_frag *pfrag = sk_page_frag(sk);
30         int ret = 0;
31
32         len -= msg->sg.size;
33         while (len > 0) {
34                 struct scatterlist *sge;
35                 u32 orig_offset;
36                 int use, i;
37
38                 if (!sk_page_frag_refill(sk, pfrag))
39                         return -ENOMEM;
40
41                 orig_offset = pfrag->offset;
42                 use = min_t(int, len, pfrag->size - orig_offset);
43                 if (!sk_wmem_schedule(sk, use))
44                         return -ENOMEM;
45
46                 i = msg->sg.end;
47                 sk_msg_iter_var_prev(i);
48                 sge = &msg->sg.data[i];
49
50                 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51                     sg_page(sge) == pfrag->page &&
52                     sge->offset + sge->length == orig_offset) {
53                         sge->length += use;
54                 } else {
55                         if (sk_msg_full(msg)) {
56                                 ret = -ENOSPC;
57                                 break;
58                         }
59
60                         sge = &msg->sg.data[msg->sg.end];
61                         sg_unmark_end(sge);
62                         sg_set_page(sge, pfrag->page, use, orig_offset);
63                         get_page(pfrag->page);
64                         sk_msg_iter_next(msg, end);
65                 }
66
67                 sk_mem_charge(sk, use);
68                 msg->sg.size += use;
69                 pfrag->offset += use;
70                 len -= use;
71         }
72
73         return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78                  u32 off, u32 len)
79 {
80         int i = src->sg.start;
81         struct scatterlist *sge = sk_msg_elem(src, i);
82         struct scatterlist *sgd = NULL;
83         u32 sge_len, sge_off;
84
85         while (off) {
86                 if (sge->length > off)
87                         break;
88                 off -= sge->length;
89                 sk_msg_iter_var_next(i);
90                 if (i == src->sg.end && off)
91                         return -ENOSPC;
92                 sge = sk_msg_elem(src, i);
93         }
94
95         while (len) {
96                 sge_len = sge->length - off;
97                 if (sge_len > len)
98                         sge_len = len;
99
100                 if (dst->sg.end)
101                         sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103                 if (sgd &&
104                     (sg_page(sge) == sg_page(sgd)) &&
105                     (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106                         sgd->length += sge_len;
107                         dst->sg.size += sge_len;
108                 } else if (!sk_msg_full(dst)) {
109                         sge_off = sge->offset + off;
110                         sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111                 } else {
112                         return -ENOSPC;
113                 }
114
115                 off = 0;
116                 len -= sge_len;
117                 sk_mem_charge(sk, sge_len);
118                 sk_msg_iter_var_next(i);
119                 if (i == src->sg.end && len)
120                         return -ENOSPC;
121                 sge = sk_msg_elem(src, i);
122         }
123
124         return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127
128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130         int i = msg->sg.start;
131
132         do {
133                 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135                 if (bytes < sge->length) {
136                         sge->length -= bytes;
137                         sge->offset += bytes;
138                         sk_mem_uncharge(sk, bytes);
139                         break;
140                 }
141
142                 sk_mem_uncharge(sk, sge->length);
143                 bytes -= sge->length;
144                 sge->length = 0;
145                 sge->offset = 0;
146                 sk_msg_iter_var_next(i);
147         } while (bytes && i != msg->sg.end);
148         msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154         int i = msg->sg.start;
155
156         do {
157                 struct scatterlist *sge = &msg->sg.data[i];
158                 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160                 sk_mem_uncharge(sk, uncharge);
161                 bytes -= uncharge;
162                 sk_msg_iter_var_next(i);
163         } while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166
167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168                             bool charge)
169 {
170         struct scatterlist *sge = sk_msg_elem(msg, i);
171         u32 len = sge->length;
172
173         /* When the skb owns the memory we free it from consume_skb path. */
174         if (!msg->skb) {
175                 if (charge)
176                         sk_mem_uncharge(sk, len);
177                 put_page(sg_page(sge));
178         }
179         memset(sge, 0, sizeof(*sge));
180         return len;
181 }
182
183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184                          bool charge)
185 {
186         struct scatterlist *sge = sk_msg_elem(msg, i);
187         int freed = 0;
188
189         while (msg->sg.size) {
190                 msg->sg.size -= sge->length;
191                 freed += sk_msg_free_elem(sk, msg, i, charge);
192                 sk_msg_iter_var_next(i);
193                 sk_msg_check_to_free(msg, i, msg->sg.size);
194                 sge = sk_msg_elem(msg, i);
195         }
196         consume_skb(msg->skb);
197         sk_msg_init(msg);
198         return freed;
199 }
200
201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203         return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209         return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212
213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214                                   u32 bytes, bool charge)
215 {
216         struct scatterlist *sge;
217         u32 i = msg->sg.start;
218
219         while (bytes) {
220                 sge = sk_msg_elem(msg, i);
221                 if (!sge->length)
222                         break;
223                 if (bytes < sge->length) {
224                         if (charge)
225                                 sk_mem_uncharge(sk, bytes);
226                         sge->length -= bytes;
227                         sge->offset += bytes;
228                         msg->sg.size -= bytes;
229                         break;
230                 }
231
232                 msg->sg.size -= sge->length;
233                 bytes -= sge->length;
234                 sk_msg_free_elem(sk, msg, i, charge);
235                 sk_msg_iter_var_next(i);
236                 sk_msg_check_to_free(msg, i, bytes);
237         }
238         msg->sg.start = i;
239 }
240
241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243         __sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248                                   u32 bytes)
249 {
250         __sk_msg_free_partial(sk, msg, bytes, false);
251 }
252
253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255         int trim = msg->sg.size - len;
256         u32 i = msg->sg.end;
257
258         if (trim <= 0) {
259                 WARN_ON(trim < 0);
260                 return;
261         }
262
263         sk_msg_iter_var_prev(i);
264         msg->sg.size = len;
265         while (msg->sg.data[i].length &&
266                trim >= msg->sg.data[i].length) {
267                 trim -= msg->sg.data[i].length;
268                 sk_msg_free_elem(sk, msg, i, true);
269                 sk_msg_iter_var_prev(i);
270                 if (!trim)
271                         goto out;
272         }
273
274         msg->sg.data[i].length -= trim;
275         sk_mem_uncharge(sk, trim);
276         /* Adjust copybreak if it falls into the trimmed part of last buf */
277         if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278                 msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280         sk_msg_iter_var_next(i);
281         msg->sg.end = i;
282
283         /* If we trim data a full sg elem before curr pointer update
284          * copybreak and current so that any future copy operations
285          * start at new copy location.
286          * However trimed data that has not yet been used in a copy op
287          * does not require an update.
288          */
289         if (!msg->sg.size) {
290                 msg->sg.curr = msg->sg.start;
291                 msg->sg.copybreak = 0;
292         } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293                    sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294                 sk_msg_iter_var_prev(i);
295                 msg->sg.curr = i;
296                 msg->sg.copybreak = msg->sg.data[i].length;
297         }
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300
301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302                               struct sk_msg *msg, u32 bytes)
303 {
304         int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305         const int to_max_pages = MAX_MSG_FRAGS;
306         struct page *pages[MAX_MSG_FRAGS];
307         ssize_t orig, copied, use, offset;
308
309         orig = msg->sg.size;
310         while (bytes > 0) {
311                 i = 0;
312                 maxpages = to_max_pages - num_elems;
313                 if (maxpages == 0) {
314                         ret = -EFAULT;
315                         goto out;
316                 }
317
318                 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319                                             &offset);
320                 if (copied <= 0) {
321                         ret = -EFAULT;
322                         goto out;
323                 }
324
325                 iov_iter_advance(from, copied);
326                 bytes -= copied;
327                 msg->sg.size += copied;
328
329                 while (copied) {
330                         use = min_t(int, copied, PAGE_SIZE - offset);
331                         sg_set_page(&msg->sg.data[msg->sg.end],
332                                     pages[i], use, offset);
333                         sg_unmark_end(&msg->sg.data[msg->sg.end]);
334                         sk_mem_charge(sk, use);
335
336                         offset = 0;
337                         copied -= use;
338                         sk_msg_iter_next(msg, end);
339                         num_elems++;
340                         i++;
341                 }
342                 /* When zerocopy is mixed with sk_msg_*copy* operations we
343                  * may have a copybreak set in this case clear and prefer
344                  * zerocopy remainder when possible.
345                  */
346                 msg->sg.copybreak = 0;
347                 msg->sg.curr = msg->sg.end;
348         }
349 out:
350         /* Revert iov_iter updates, msg will need to use 'trim' later if it
351          * also needs to be cleared.
352          */
353         if (ret)
354                 iov_iter_revert(from, msg->sg.size - orig);
355         return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360                              struct sk_msg *msg, u32 bytes)
361 {
362         int ret = -ENOSPC, i = msg->sg.curr;
363         struct scatterlist *sge;
364         u32 copy, buf_size;
365         void *to;
366
367         do {
368                 sge = sk_msg_elem(msg, i);
369                 /* This is possible if a trim operation shrunk the buffer */
370                 if (msg->sg.copybreak >= sge->length) {
371                         msg->sg.copybreak = 0;
372                         sk_msg_iter_var_next(i);
373                         if (i == msg->sg.end)
374                                 break;
375                         sge = sk_msg_elem(msg, i);
376                 }
377
378                 buf_size = sge->length - msg->sg.copybreak;
379                 copy = (buf_size > bytes) ? bytes : buf_size;
380                 to = sg_virt(sge) + msg->sg.copybreak;
381                 msg->sg.copybreak += copy;
382                 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383                         ret = copy_from_iter_nocache(to, copy, from);
384                 else
385                         ret = copy_from_iter(to, copy, from);
386                 if (ret != copy) {
387                         ret = -EFAULT;
388                         goto out;
389                 }
390                 bytes -= copy;
391                 if (!bytes)
392                         break;
393                 msg->sg.copybreak = 0;
394                 sk_msg_iter_var_next(i);
395         } while (i != msg->sg.end);
396 out:
397         msg->sg.curr = i;
398         return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
402 int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
403                      long timeo, int *err)
404 {
405         DEFINE_WAIT_FUNC(wait, woken_wake_function);
406         int ret = 0;
407
408         if (sk->sk_shutdown & RCV_SHUTDOWN)
409                 return 1;
410
411         if (!timeo)
412                 return ret;
413
414         add_wait_queue(sk_sleep(sk), &wait);
415         sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
416         ret = sk_wait_event(sk, &timeo,
417                             !list_empty(&psock->ingress_msg) ||
418                             !skb_queue_empty(&sk->sk_receive_queue), &wait);
419         sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
420         remove_wait_queue(sk_sleep(sk), &wait);
421         return ret;
422 }
423 EXPORT_SYMBOL_GPL(sk_msg_wait_data);
424
425 /* Receive sk_msg from psock->ingress_msg to @msg. */
426 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
427                    int len, int flags)
428 {
429         struct iov_iter *iter = &msg->msg_iter;
430         int peek = flags & MSG_PEEK;
431         struct sk_msg *msg_rx;
432         int i, copied = 0;
433
434         msg_rx = sk_psock_peek_msg(psock);
435         while (copied != len) {
436                 struct scatterlist *sge;
437
438                 if (unlikely(!msg_rx))
439                         break;
440
441                 i = msg_rx->sg.start;
442                 do {
443                         struct page *page;
444                         int copy;
445
446                         sge = sk_msg_elem(msg_rx, i);
447                         copy = sge->length;
448                         page = sg_page(sge);
449                         if (copied + copy > len)
450                                 copy = len - copied;
451                         copy = copy_page_to_iter(page, sge->offset, copy, iter);
452                         if (!copy)
453                                 return copied ? copied : -EFAULT;
454
455                         copied += copy;
456                         if (likely(!peek)) {
457                                 sge->offset += copy;
458                                 sge->length -= copy;
459                                 if (!msg_rx->skb)
460                                         sk_mem_uncharge(sk, copy);
461                                 msg_rx->sg.size -= copy;
462
463                                 if (!sge->length) {
464                                         sk_msg_iter_var_next(i);
465                                         if (!msg_rx->skb)
466                                                 put_page(page);
467                                 }
468                         } else {
469                                 /* Lets not optimize peek case if copy_page_to_iter
470                                  * didn't copy the entire length lets just break.
471                                  */
472                                 if (copy != sge->length)
473                                         return copied;
474                                 sk_msg_iter_var_next(i);
475                         }
476
477                         if (copied == len)
478                                 break;
479                 } while (i != msg_rx->sg.end);
480
481                 if (unlikely(peek)) {
482                         msg_rx = sk_psock_next_msg(psock, msg_rx);
483                         if (!msg_rx)
484                                 break;
485                         continue;
486                 }
487
488                 msg_rx->sg.start = i;
489                 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
490                         msg_rx = sk_psock_dequeue_msg(psock);
491                         kfree_sk_msg(msg_rx);
492                 }
493                 msg_rx = sk_psock_peek_msg(psock);
494         }
495
496         return copied;
497 }
498 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
499
500 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
501                                                   struct sk_buff *skb)
502 {
503         struct sk_msg *msg;
504
505         if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
506                 return NULL;
507
508         if (!sk_rmem_schedule(sk, skb, skb->truesize))
509                 return NULL;
510
511         msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
512         if (unlikely(!msg))
513                 return NULL;
514
515         sk_msg_init(msg);
516         return msg;
517 }
518
519 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
520                                         struct sk_psock *psock,
521                                         struct sock *sk,
522                                         struct sk_msg *msg)
523 {
524         int num_sge, copied;
525
526         /* skb linearize may fail with ENOMEM, but lets simply try again
527          * later if this happens. Under memory pressure we don't want to
528          * drop the skb. We need to linearize the skb so that the mapping
529          * in skb_to_sgvec can not error.
530          */
531         if (skb_linearize(skb))
532                 return -EAGAIN;
533         num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
534         if (unlikely(num_sge < 0))
535                 return num_sge;
536
537         copied = skb->len;
538         msg->sg.start = 0;
539         msg->sg.size = copied;
540         msg->sg.end = num_sge;
541         msg->skb = skb;
542
543         sk_psock_queue_msg(psock, msg);
544         sk_psock_data_ready(sk, psock);
545         return copied;
546 }
547
548 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
549
550 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
551 {
552         struct sock *sk = psock->sk;
553         struct sk_msg *msg;
554         int err;
555
556         /* If we are receiving on the same sock skb->sk is already assigned,
557          * skip memory accounting and owner transition seeing it already set
558          * correctly.
559          */
560         if (unlikely(skb->sk == sk))
561                 return sk_psock_skb_ingress_self(psock, skb);
562         msg = sk_psock_create_ingress_msg(sk, skb);
563         if (!msg)
564                 return -EAGAIN;
565
566         /* This will transition ownership of the data from the socket where
567          * the BPF program was run initiating the redirect to the socket
568          * we will eventually receive this data on. The data will be released
569          * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
570          * into user buffers.
571          */
572         skb_set_owner_r(skb, sk);
573         err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
574         if (err < 0)
575                 kfree(msg);
576         return err;
577 }
578
579 /* Puts an skb on the ingress queue of the socket already assigned to the
580  * skb. In this case we do not need to check memory limits or skb_set_owner_r
581  * because the skb is already accounted for here.
582  */
583 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
584 {
585         struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
586         struct sock *sk = psock->sk;
587         int err;
588
589         if (unlikely(!msg))
590                 return -EAGAIN;
591         sk_msg_init(msg);
592         skb_set_owner_r(skb, sk);
593         err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
594         if (err < 0)
595                 kfree(msg);
596         return err;
597 }
598
599 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
600                                u32 off, u32 len, bool ingress)
601 {
602         if (!ingress) {
603                 if (!sock_writeable(psock->sk))
604                         return -EAGAIN;
605                 return skb_send_sock(psock->sk, skb, off, len);
606         }
607         return sk_psock_skb_ingress(psock, skb);
608 }
609
610 static void sock_drop(struct sock *sk, struct sk_buff *skb)
611 {
612         sk_drops_add(sk, skb);
613         kfree_skb(skb);
614 }
615
616 static void sk_psock_skb_state(struct sk_psock *psock,
617                                struct sk_psock_work_state *state,
618                                struct sk_buff *skb,
619                                int len, int off)
620 {
621         spin_lock_bh(&psock->ingress_lock);
622         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
623                 state->skb = skb;
624                 state->len = len;
625                 state->off = off;
626         } else {
627                 sock_drop(psock->sk, skb);
628         }
629         spin_unlock_bh(&psock->ingress_lock);
630 }
631
632 static void sk_psock_backlog(struct work_struct *work)
633 {
634         struct sk_psock *psock = container_of(work, struct sk_psock, work);
635         struct sk_psock_work_state *state = &psock->work_state;
636         struct sk_buff *skb = NULL;
637         bool ingress;
638         u32 len, off;
639         int ret;
640
641         mutex_lock(&psock->work_mutex);
642         if (unlikely(state->skb)) {
643                 spin_lock_bh(&psock->ingress_lock);
644                 skb = state->skb;
645                 len = state->len;
646                 off = state->off;
647                 state->skb = NULL;
648                 spin_unlock_bh(&psock->ingress_lock);
649         }
650         if (skb)
651                 goto start;
652
653         while ((skb = skb_dequeue(&psock->ingress_skb))) {
654                 len = skb->len;
655                 off = 0;
656 start:
657                 ingress = skb_bpf_ingress(skb);
658                 skb_bpf_redirect_clear(skb);
659                 do {
660                         ret = -EIO;
661                         if (!sock_flag(psock->sk, SOCK_DEAD))
662                                 ret = sk_psock_handle_skb(psock, skb, off,
663                                                           len, ingress);
664                         if (ret <= 0) {
665                                 if (ret == -EAGAIN) {
666                                         sk_psock_skb_state(psock, state, skb,
667                                                            len, off);
668                                         goto end;
669                                 }
670                                 /* Hard errors break pipe and stop xmit. */
671                                 sk_psock_report_error(psock, ret ? -ret : EPIPE);
672                                 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
673                                 sock_drop(psock->sk, skb);
674                                 goto end;
675                         }
676                         off += ret;
677                         len -= ret;
678                 } while (len);
679
680                 if (!ingress)
681                         kfree_skb(skb);
682         }
683 end:
684         mutex_unlock(&psock->work_mutex);
685 }
686
687 struct sk_psock *sk_psock_init(struct sock *sk, int node)
688 {
689         struct sk_psock *psock;
690         struct proto *prot;
691
692         write_lock_bh(&sk->sk_callback_lock);
693
694         if (sk->sk_user_data) {
695                 psock = ERR_PTR(-EBUSY);
696                 goto out;
697         }
698
699         psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
700         if (!psock) {
701                 psock = ERR_PTR(-ENOMEM);
702                 goto out;
703         }
704
705         prot = READ_ONCE(sk->sk_prot);
706         psock->sk = sk;
707         psock->eval = __SK_NONE;
708         psock->sk_proto = prot;
709         psock->saved_unhash = prot->unhash;
710         psock->saved_close = prot->close;
711         psock->saved_write_space = sk->sk_write_space;
712
713         INIT_LIST_HEAD(&psock->link);
714         spin_lock_init(&psock->link_lock);
715
716         INIT_WORK(&psock->work, sk_psock_backlog);
717         mutex_init(&psock->work_mutex);
718         INIT_LIST_HEAD(&psock->ingress_msg);
719         spin_lock_init(&psock->ingress_lock);
720         skb_queue_head_init(&psock->ingress_skb);
721
722         sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
723         refcount_set(&psock->refcnt, 1);
724
725         rcu_assign_sk_user_data_nocopy(sk, psock);
726         sock_hold(sk);
727
728 out:
729         write_unlock_bh(&sk->sk_callback_lock);
730         return psock;
731 }
732 EXPORT_SYMBOL_GPL(sk_psock_init);
733
734 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
735 {
736         struct sk_psock_link *link;
737
738         spin_lock_bh(&psock->link_lock);
739         link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
740                                         list);
741         if (link)
742                 list_del(&link->list);
743         spin_unlock_bh(&psock->link_lock);
744         return link;
745 }
746
747 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
748 {
749         struct sk_msg *msg, *tmp;
750
751         list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
752                 list_del(&msg->list);
753                 sk_msg_free(psock->sk, msg);
754                 kfree(msg);
755         }
756 }
757
758 static void __sk_psock_zap_ingress(struct sk_psock *psock)
759 {
760         struct sk_buff *skb;
761
762         while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
763                 skb_bpf_redirect_clear(skb);
764                 sock_drop(psock->sk, skb);
765         }
766         kfree_skb(psock->work_state.skb);
767         /* We null the skb here to ensure that calls to sk_psock_backlog
768          * do not pick up the free'd skb.
769          */
770         psock->work_state.skb = NULL;
771         __sk_psock_purge_ingress_msg(psock);
772 }
773
774 static void sk_psock_link_destroy(struct sk_psock *psock)
775 {
776         struct sk_psock_link *link, *tmp;
777
778         list_for_each_entry_safe(link, tmp, &psock->link, list) {
779                 list_del(&link->list);
780                 sk_psock_free_link(link);
781         }
782 }
783
784 void sk_psock_stop(struct sk_psock *psock, bool wait)
785 {
786         spin_lock_bh(&psock->ingress_lock);
787         sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
788         sk_psock_cork_free(psock);
789         __sk_psock_zap_ingress(psock);
790         spin_unlock_bh(&psock->ingress_lock);
791
792         if (wait)
793                 cancel_work_sync(&psock->work);
794 }
795
796 static void sk_psock_done_strp(struct sk_psock *psock);
797
798 static void sk_psock_destroy(struct work_struct *work)
799 {
800         struct sk_psock *psock = container_of(to_rcu_work(work),
801                                               struct sk_psock, rwork);
802         /* No sk_callback_lock since already detached. */
803
804         sk_psock_done_strp(psock);
805
806         cancel_work_sync(&psock->work);
807         mutex_destroy(&psock->work_mutex);
808
809         psock_progs_drop(&psock->progs);
810
811         sk_psock_link_destroy(psock);
812         sk_psock_cork_free(psock);
813
814         if (psock->sk_redir)
815                 sock_put(psock->sk_redir);
816         sock_put(psock->sk);
817         kfree(psock);
818 }
819
820 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
821 {
822         write_lock_bh(&sk->sk_callback_lock);
823         sk_psock_restore_proto(sk, psock);
824         rcu_assign_sk_user_data(sk, NULL);
825         if (psock->progs.stream_parser)
826                 sk_psock_stop_strp(sk, psock);
827         else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
828                 sk_psock_stop_verdict(sk, psock);
829         write_unlock_bh(&sk->sk_callback_lock);
830
831         sk_psock_stop(psock, false);
832
833         INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
834         queue_rcu_work(system_wq, &psock->rwork);
835 }
836 EXPORT_SYMBOL_GPL(sk_psock_drop);
837
838 static int sk_psock_map_verd(int verdict, bool redir)
839 {
840         switch (verdict) {
841         case SK_PASS:
842                 return redir ? __SK_REDIRECT : __SK_PASS;
843         case SK_DROP:
844         default:
845                 break;
846         }
847
848         return __SK_DROP;
849 }
850
851 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
852                          struct sk_msg *msg)
853 {
854         struct bpf_prog *prog;
855         int ret;
856
857         rcu_read_lock();
858         prog = READ_ONCE(psock->progs.msg_parser);
859         if (unlikely(!prog)) {
860                 ret = __SK_PASS;
861                 goto out;
862         }
863
864         sk_msg_compute_data_pointers(msg);
865         msg->sk = sk;
866         ret = bpf_prog_run_pin_on_cpu(prog, msg);
867         ret = sk_psock_map_verd(ret, msg->sk_redir);
868         psock->apply_bytes = msg->apply_bytes;
869         if (ret == __SK_REDIRECT) {
870                 if (psock->sk_redir)
871                         sock_put(psock->sk_redir);
872                 psock->sk_redir = msg->sk_redir;
873                 if (!psock->sk_redir) {
874                         ret = __SK_DROP;
875                         goto out;
876                 }
877                 sock_hold(psock->sk_redir);
878         }
879 out:
880         rcu_read_unlock();
881         return ret;
882 }
883 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
884
885 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
886 {
887         struct sk_psock *psock_other;
888         struct sock *sk_other;
889
890         sk_other = skb_bpf_redirect_fetch(skb);
891         /* This error is a buggy BPF program, it returned a redirect
892          * return code, but then didn't set a redirect interface.
893          */
894         if (unlikely(!sk_other)) {
895                 sock_drop(from->sk, skb);
896                 return -EIO;
897         }
898         psock_other = sk_psock(sk_other);
899         /* This error indicates the socket is being torn down or had another
900          * error that caused the pipe to break. We can't send a packet on
901          * a socket that is in this state so we drop the skb.
902          */
903         if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
904                 skb_bpf_redirect_clear(skb);
905                 sock_drop(from->sk, skb);
906                 return -EIO;
907         }
908         spin_lock_bh(&psock_other->ingress_lock);
909         if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
910                 spin_unlock_bh(&psock_other->ingress_lock);
911                 skb_bpf_redirect_clear(skb);
912                 sock_drop(from->sk, skb);
913                 return -EIO;
914         }
915
916         skb_queue_tail(&psock_other->ingress_skb, skb);
917         schedule_work(&psock_other->work);
918         spin_unlock_bh(&psock_other->ingress_lock);
919         return 0;
920 }
921
922 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
923                                        struct sk_psock *from, int verdict)
924 {
925         switch (verdict) {
926         case __SK_REDIRECT:
927                 sk_psock_skb_redirect(from, skb);
928                 break;
929         case __SK_PASS:
930         case __SK_DROP:
931         default:
932                 break;
933         }
934 }
935
936 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
937 {
938         struct bpf_prog *prog;
939         int ret = __SK_PASS;
940
941         rcu_read_lock();
942         prog = READ_ONCE(psock->progs.stream_verdict);
943         if (likely(prog)) {
944                 skb->sk = psock->sk;
945                 skb_dst_drop(skb);
946                 skb_bpf_redirect_clear(skb);
947                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
948                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
949                 skb->sk = NULL;
950         }
951         sk_psock_tls_verdict_apply(skb, psock, ret);
952         rcu_read_unlock();
953         return ret;
954 }
955 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
956
957 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
958                                   int verdict)
959 {
960         struct sock *sk_other;
961         int err = 0;
962
963         switch (verdict) {
964         case __SK_PASS:
965                 err = -EIO;
966                 sk_other = psock->sk;
967                 if (sock_flag(sk_other, SOCK_DEAD) ||
968                     !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
969                         goto out_free;
970                 }
971
972                 skb_bpf_set_ingress(skb);
973
974                 /* If the queue is empty then we can submit directly
975                  * into the msg queue. If its not empty we have to
976                  * queue work otherwise we may get OOO data. Otherwise,
977                  * if sk_psock_skb_ingress errors will be handled by
978                  * retrying later from workqueue.
979                  */
980                 if (skb_queue_empty(&psock->ingress_skb)) {
981                         err = sk_psock_skb_ingress_self(psock, skb);
982                 }
983                 if (err < 0) {
984                         spin_lock_bh(&psock->ingress_lock);
985                         if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
986                                 skb_queue_tail(&psock->ingress_skb, skb);
987                                 schedule_work(&psock->work);
988                                 err = 0;
989                         }
990                         spin_unlock_bh(&psock->ingress_lock);
991                         if (err < 0) {
992                                 skb_bpf_redirect_clear(skb);
993                                 goto out_free;
994                         }
995                 }
996                 break;
997         case __SK_REDIRECT:
998                 err = sk_psock_skb_redirect(psock, skb);
999                 break;
1000         case __SK_DROP:
1001         default:
1002 out_free:
1003                 sock_drop(psock->sk, skb);
1004         }
1005
1006         return err;
1007 }
1008
1009 static void sk_psock_write_space(struct sock *sk)
1010 {
1011         struct sk_psock *psock;
1012         void (*write_space)(struct sock *sk) = NULL;
1013
1014         rcu_read_lock();
1015         psock = sk_psock(sk);
1016         if (likely(psock)) {
1017                 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1018                         schedule_work(&psock->work);
1019                 write_space = psock->saved_write_space;
1020         }
1021         rcu_read_unlock();
1022         if (write_space)
1023                 write_space(sk);
1024 }
1025
1026 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1027 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1028 {
1029         struct sk_psock *psock;
1030         struct bpf_prog *prog;
1031         int ret = __SK_DROP;
1032         struct sock *sk;
1033
1034         rcu_read_lock();
1035         sk = strp->sk;
1036         psock = sk_psock(sk);
1037         if (unlikely(!psock)) {
1038                 sock_drop(sk, skb);
1039                 goto out;
1040         }
1041         prog = READ_ONCE(psock->progs.stream_verdict);
1042         if (likely(prog)) {
1043                 skb->sk = sk;
1044                 skb_dst_drop(skb);
1045                 skb_bpf_redirect_clear(skb);
1046                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1047                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1048                 skb->sk = NULL;
1049         }
1050         sk_psock_verdict_apply(psock, skb, ret);
1051 out:
1052         rcu_read_unlock();
1053 }
1054
1055 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1056 {
1057         return err;
1058 }
1059
1060 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1061 {
1062         struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1063         struct bpf_prog *prog;
1064         int ret = skb->len;
1065
1066         rcu_read_lock();
1067         prog = READ_ONCE(psock->progs.stream_parser);
1068         if (likely(prog)) {
1069                 skb->sk = psock->sk;
1070                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1071                 skb->sk = NULL;
1072         }
1073         rcu_read_unlock();
1074         return ret;
1075 }
1076
1077 /* Called with socket lock held. */
1078 static void sk_psock_strp_data_ready(struct sock *sk)
1079 {
1080         struct sk_psock *psock;
1081
1082         rcu_read_lock();
1083         psock = sk_psock(sk);
1084         if (likely(psock)) {
1085                 if (tls_sw_has_ctx_rx(sk)) {
1086                         psock->saved_data_ready(sk);
1087                 } else {
1088                         write_lock_bh(&sk->sk_callback_lock);
1089                         strp_data_ready(&psock->strp);
1090                         write_unlock_bh(&sk->sk_callback_lock);
1091                 }
1092         }
1093         rcu_read_unlock();
1094 }
1095
1096 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1097 {
1098         static const struct strp_callbacks cb = {
1099                 .rcv_msg        = sk_psock_strp_read,
1100                 .read_sock_done = sk_psock_strp_read_done,
1101                 .parse_msg      = sk_psock_strp_parse,
1102         };
1103
1104         return strp_init(&psock->strp, sk, &cb);
1105 }
1106
1107 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1108 {
1109         if (psock->saved_data_ready)
1110                 return;
1111
1112         psock->saved_data_ready = sk->sk_data_ready;
1113         sk->sk_data_ready = sk_psock_strp_data_ready;
1114         sk->sk_write_space = sk_psock_write_space;
1115 }
1116
1117 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1118 {
1119         if (!psock->saved_data_ready)
1120                 return;
1121
1122         sk->sk_data_ready = psock->saved_data_ready;
1123         psock->saved_data_ready = NULL;
1124         strp_stop(&psock->strp);
1125 }
1126
1127 static void sk_psock_done_strp(struct sk_psock *psock)
1128 {
1129         /* Parser has been stopped */
1130         if (psock->progs.stream_parser)
1131                 strp_done(&psock->strp);
1132 }
1133 #else
1134 static void sk_psock_done_strp(struct sk_psock *psock)
1135 {
1136 }
1137 #endif /* CONFIG_BPF_STREAM_PARSER */
1138
1139 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1140                                  unsigned int offset, size_t orig_len)
1141 {
1142         struct sock *sk = (struct sock *)desc->arg.data;
1143         struct sk_psock *psock;
1144         struct bpf_prog *prog;
1145         int ret = __SK_DROP;
1146         int len = skb->len;
1147
1148         /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1149         skb = skb_clone(skb, GFP_ATOMIC);
1150         if (!skb) {
1151                 desc->error = -ENOMEM;
1152                 return 0;
1153         }
1154
1155         rcu_read_lock();
1156         psock = sk_psock(sk);
1157         if (unlikely(!psock)) {
1158                 len = 0;
1159                 sock_drop(sk, skb);
1160                 goto out;
1161         }
1162         prog = READ_ONCE(psock->progs.stream_verdict);
1163         if (!prog)
1164                 prog = READ_ONCE(psock->progs.skb_verdict);
1165         if (likely(prog)) {
1166                 skb->sk = sk;
1167                 skb_dst_drop(skb);
1168                 skb_bpf_redirect_clear(skb);
1169                 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1170                 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1171                 skb->sk = NULL;
1172         }
1173         if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1174                 len = 0;
1175 out:
1176         rcu_read_unlock();
1177         return len;
1178 }
1179
1180 static void sk_psock_verdict_data_ready(struct sock *sk)
1181 {
1182         struct socket *sock = sk->sk_socket;
1183         read_descriptor_t desc;
1184
1185         if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1186                 return;
1187
1188         desc.arg.data = sk;
1189         desc.error = 0;
1190         desc.count = 1;
1191
1192         sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1193 }
1194
1195 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1196 {
1197         if (psock->saved_data_ready)
1198                 return;
1199
1200         psock->saved_data_ready = sk->sk_data_ready;
1201         sk->sk_data_ready = sk_psock_verdict_data_ready;
1202         sk->sk_write_space = sk_psock_write_space;
1203 }
1204
1205 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1206 {
1207         if (!psock->saved_data_ready)
1208                 return;
1209
1210         sk->sk_data_ready = psock->saved_data_ready;
1211         psock->saved_data_ready = NULL;
1212 }