GNU Linux-libre 4.19.295-gnu1
[releases.git] / net / kcm / kcmsock.c
1 /*
2  * Kernel Connection Multiplexor
3  *
4  * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2
8  * as published by the Free Software Foundation.
9  */
10
11 #include <linux/bpf.h>
12 #include <linux/errno.h>
13 #include <linux/errqueue.h>
14 #include <linux/file.h>
15 #include <linux/in.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/poll.h>
21 #include <linux/rculist.h>
22 #include <linux/skbuff.h>
23 #include <linux/socket.h>
24 #include <linux/uaccess.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
27 #include <linux/sched/signal.h>
28
29 #include <net/kcm.h>
30 #include <net/netns/generic.h>
31 #include <net/sock.h>
32 #include <uapi/linux/kcm.h>
33
34 unsigned int kcm_net_id;
35
36 static struct kmem_cache *kcm_psockp __read_mostly;
37 static struct kmem_cache *kcm_muxp __read_mostly;
38 static struct workqueue_struct *kcm_wq;
39
40 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
41 {
42         return (struct kcm_sock *)sk;
43 }
44
45 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
46 {
47         return (struct kcm_tx_msg *)skb->cb;
48 }
49
50 static void report_csk_error(struct sock *csk, int err)
51 {
52         csk->sk_err = EPIPE;
53         csk->sk_error_report(csk);
54 }
55
56 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
57                                bool wakeup_kcm)
58 {
59         struct sock *csk = psock->sk;
60         struct kcm_mux *mux = psock->mux;
61
62         /* Unrecoverable error in transmit */
63
64         spin_lock_bh(&mux->lock);
65
66         if (psock->tx_stopped) {
67                 spin_unlock_bh(&mux->lock);
68                 return;
69         }
70
71         psock->tx_stopped = 1;
72         KCM_STATS_INCR(psock->stats.tx_aborts);
73
74         if (!psock->tx_kcm) {
75                 /* Take off psocks_avail list */
76                 list_del(&psock->psock_avail_list);
77         } else if (wakeup_kcm) {
78                 /* In this case psock is being aborted while outside of
79                  * write_msgs and psock is reserved. Schedule tx_work
80                  * to handle the failure there. Need to commit tx_stopped
81                  * before queuing work.
82                  */
83                 smp_mb();
84
85                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
86         }
87
88         spin_unlock_bh(&mux->lock);
89
90         /* Report error on lower socket */
91         report_csk_error(csk, err);
92 }
93
94 /* RX mux lock held. */
95 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
96                                     struct kcm_psock *psock)
97 {
98         STRP_STATS_ADD(mux->stats.rx_bytes,
99                        psock->strp.stats.bytes -
100                        psock->saved_rx_bytes);
101         mux->stats.rx_msgs +=
102                 psock->strp.stats.msgs - psock->saved_rx_msgs;
103         psock->saved_rx_msgs = psock->strp.stats.msgs;
104         psock->saved_rx_bytes = psock->strp.stats.bytes;
105 }
106
107 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
108                                     struct kcm_psock *psock)
109 {
110         KCM_STATS_ADD(mux->stats.tx_bytes,
111                       psock->stats.tx_bytes - psock->saved_tx_bytes);
112         mux->stats.tx_msgs +=
113                 psock->stats.tx_msgs - psock->saved_tx_msgs;
114         psock->saved_tx_msgs = psock->stats.tx_msgs;
115         psock->saved_tx_bytes = psock->stats.tx_bytes;
116 }
117
118 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
119
120 /* KCM is ready to receive messages on its queue-- either the KCM is new or
121  * has become unblocked after being blocked on full socket buffer. Queue any
122  * pending ready messages on a psock. RX mux lock held.
123  */
124 static void kcm_rcv_ready(struct kcm_sock *kcm)
125 {
126         struct kcm_mux *mux = kcm->mux;
127         struct kcm_psock *psock;
128         struct sk_buff *skb;
129
130         if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
131                 return;
132
133         while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
134                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
135                         /* Assuming buffer limit has been reached */
136                         skb_queue_head(&mux->rx_hold_queue, skb);
137                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
138                         return;
139                 }
140         }
141
142         while (!list_empty(&mux->psocks_ready)) {
143                 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
144                                          psock_ready_list);
145
146                 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
147                         /* Assuming buffer limit has been reached */
148                         WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
149                         return;
150                 }
151
152                 /* Consumed the ready message on the psock. Schedule rx_work to
153                  * get more messages.
154                  */
155                 list_del(&psock->psock_ready_list);
156                 psock->ready_rx_msg = NULL;
157                 /* Commit clearing of ready_rx_msg for queuing work */
158                 smp_mb();
159
160                 strp_unpause(&psock->strp);
161                 strp_check_rcv(&psock->strp);
162         }
163
164         /* Buffer limit is okay now, add to ready list */
165         list_add_tail(&kcm->wait_rx_list,
166                       &kcm->mux->kcm_rx_waiters);
167         /* paired with lockless reads in kcm_rfree() */
168         WRITE_ONCE(kcm->rx_wait, true);
169 }
170
171 static void kcm_rfree(struct sk_buff *skb)
172 {
173         struct sock *sk = skb->sk;
174         struct kcm_sock *kcm = kcm_sk(sk);
175         struct kcm_mux *mux = kcm->mux;
176         unsigned int len = skb->truesize;
177
178         sk_mem_uncharge(sk, len);
179         atomic_sub(len, &sk->sk_rmem_alloc);
180
181         /* For reading rx_wait and rx_psock without holding lock */
182         smp_mb__after_atomic();
183
184         if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
185             sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
186                 spin_lock_bh(&mux->rx_lock);
187                 kcm_rcv_ready(kcm);
188                 spin_unlock_bh(&mux->rx_lock);
189         }
190 }
191
192 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
193 {
194         struct sk_buff_head *list = &sk->sk_receive_queue;
195
196         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
197                 return -ENOMEM;
198
199         if (!sk_rmem_schedule(sk, skb, skb->truesize))
200                 return -ENOBUFS;
201
202         skb->dev = NULL;
203
204         skb_orphan(skb);
205         skb->sk = sk;
206         skb->destructor = kcm_rfree;
207         atomic_add(skb->truesize, &sk->sk_rmem_alloc);
208         sk_mem_charge(sk, skb->truesize);
209
210         skb_queue_tail(list, skb);
211
212         if (!sock_flag(sk, SOCK_DEAD))
213                 sk->sk_data_ready(sk);
214
215         return 0;
216 }
217
218 /* Requeue received messages for a kcm socket to other kcm sockets. This is
219  * called with a kcm socket is receive disabled.
220  * RX mux lock held.
221  */
222 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
223 {
224         struct sk_buff *skb;
225         struct kcm_sock *kcm;
226
227         while ((skb = skb_dequeue(head))) {
228                 /* Reset destructor to avoid calling kcm_rcv_ready */
229                 skb->destructor = sock_rfree;
230                 skb_orphan(skb);
231 try_again:
232                 if (list_empty(&mux->kcm_rx_waiters)) {
233                         skb_queue_tail(&mux->rx_hold_queue, skb);
234                         continue;
235                 }
236
237                 kcm = list_first_entry(&mux->kcm_rx_waiters,
238                                        struct kcm_sock, wait_rx_list);
239
240                 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
241                         /* Should mean socket buffer full */
242                         list_del(&kcm->wait_rx_list);
243                         /* paired with lockless reads in kcm_rfree() */
244                         WRITE_ONCE(kcm->rx_wait, false);
245
246                         /* Commit rx_wait to read in kcm_free */
247                         smp_wmb();
248
249                         goto try_again;
250                 }
251         }
252 }
253
254 /* Lower sock lock held */
255 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
256                                        struct sk_buff *head)
257 {
258         struct kcm_mux *mux = psock->mux;
259         struct kcm_sock *kcm;
260
261         WARN_ON(psock->ready_rx_msg);
262
263         if (psock->rx_kcm)
264                 return psock->rx_kcm;
265
266         spin_lock_bh(&mux->rx_lock);
267
268         if (psock->rx_kcm) {
269                 spin_unlock_bh(&mux->rx_lock);
270                 return psock->rx_kcm;
271         }
272
273         kcm_update_rx_mux_stats(mux, psock);
274
275         if (list_empty(&mux->kcm_rx_waiters)) {
276                 psock->ready_rx_msg = head;
277                 strp_pause(&psock->strp);
278                 list_add_tail(&psock->psock_ready_list,
279                               &mux->psocks_ready);
280                 spin_unlock_bh(&mux->rx_lock);
281                 return NULL;
282         }
283
284         kcm = list_first_entry(&mux->kcm_rx_waiters,
285                                struct kcm_sock, wait_rx_list);
286         list_del(&kcm->wait_rx_list);
287         /* paired with lockless reads in kcm_rfree() */
288         WRITE_ONCE(kcm->rx_wait, false);
289
290         psock->rx_kcm = kcm;
291         /* paired with lockless reads in kcm_rfree() */
292         WRITE_ONCE(kcm->rx_psock, psock);
293
294         spin_unlock_bh(&mux->rx_lock);
295
296         return kcm;
297 }
298
299 static void kcm_done(struct kcm_sock *kcm);
300
301 static void kcm_done_work(struct work_struct *w)
302 {
303         kcm_done(container_of(w, struct kcm_sock, done_work));
304 }
305
306 /* Lower sock held */
307 static void unreserve_rx_kcm(struct kcm_psock *psock,
308                              bool rcv_ready)
309 {
310         struct kcm_sock *kcm = psock->rx_kcm;
311         struct kcm_mux *mux = psock->mux;
312
313         if (!kcm)
314                 return;
315
316         spin_lock_bh(&mux->rx_lock);
317
318         psock->rx_kcm = NULL;
319         /* paired with lockless reads in kcm_rfree() */
320         WRITE_ONCE(kcm->rx_psock, NULL);
321
322         /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
323          * kcm_rfree
324          */
325         smp_mb();
326
327         if (unlikely(kcm->done)) {
328                 spin_unlock_bh(&mux->rx_lock);
329
330                 /* Need to run kcm_done in a task since we need to qcquire
331                  * callback locks which may already be held here.
332                  */
333                 INIT_WORK(&kcm->done_work, kcm_done_work);
334                 schedule_work(&kcm->done_work);
335                 return;
336         }
337
338         if (unlikely(kcm->rx_disabled)) {
339                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
340         } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
341                 /* Check for degenerative race with rx_wait that all
342                  * data was dequeued (accounted for in kcm_rfree).
343                  */
344                 kcm_rcv_ready(kcm);
345         }
346         spin_unlock_bh(&mux->rx_lock);
347 }
348
349 /* Lower sock lock held */
350 static void psock_data_ready(struct sock *sk)
351 {
352         struct kcm_psock *psock;
353
354         read_lock_bh(&sk->sk_callback_lock);
355
356         psock = (struct kcm_psock *)sk->sk_user_data;
357         if (likely(psock))
358                 strp_data_ready(&psock->strp);
359
360         read_unlock_bh(&sk->sk_callback_lock);
361 }
362
363 /* Called with lower sock held */
364 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
365 {
366         struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
367         struct kcm_sock *kcm;
368
369 try_queue:
370         kcm = reserve_rx_kcm(psock, skb);
371         if (!kcm) {
372                  /* Unable to reserve a KCM, message is held in psock and strp
373                   * is paused.
374                   */
375                 return;
376         }
377
378         if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
379                 /* Should mean socket buffer full */
380                 unreserve_rx_kcm(psock, false);
381                 goto try_queue;
382         }
383 }
384
385 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
386 {
387         struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
388         struct bpf_prog *prog = psock->bpf_prog;
389
390         return (*prog->bpf_func)(skb, prog->insnsi);
391 }
392
393 static int kcm_read_sock_done(struct strparser *strp, int err)
394 {
395         struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
396
397         unreserve_rx_kcm(psock, true);
398
399         return err;
400 }
401
402 static void psock_state_change(struct sock *sk)
403 {
404         /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
405          * since application will normally not poll with EPOLLIN
406          * on the TCP sockets.
407          */
408
409         report_csk_error(sk, EPIPE);
410 }
411
412 static void psock_write_space(struct sock *sk)
413 {
414         struct kcm_psock *psock;
415         struct kcm_mux *mux;
416         struct kcm_sock *kcm;
417
418         read_lock_bh(&sk->sk_callback_lock);
419
420         psock = (struct kcm_psock *)sk->sk_user_data;
421         if (unlikely(!psock))
422                 goto out;
423         mux = psock->mux;
424
425         spin_lock_bh(&mux->lock);
426
427         /* Check if the socket is reserved so someone is waiting for sending. */
428         kcm = psock->tx_kcm;
429         if (kcm && !unlikely(kcm->tx_stopped))
430                 queue_work(kcm_wq, &kcm->tx_work);
431
432         spin_unlock_bh(&mux->lock);
433 out:
434         read_unlock_bh(&sk->sk_callback_lock);
435 }
436
437 static void unreserve_psock(struct kcm_sock *kcm);
438
439 /* kcm sock is locked. */
440 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
441 {
442         struct kcm_mux *mux = kcm->mux;
443         struct kcm_psock *psock;
444
445         psock = kcm->tx_psock;
446
447         smp_rmb(); /* Must read tx_psock before tx_wait */
448
449         if (psock) {
450                 WARN_ON(kcm->tx_wait);
451                 if (unlikely(psock->tx_stopped))
452                         unreserve_psock(kcm);
453                 else
454                         return kcm->tx_psock;
455         }
456
457         spin_lock_bh(&mux->lock);
458
459         /* Check again under lock to see if psock was reserved for this
460          * psock via psock_unreserve.
461          */
462         psock = kcm->tx_psock;
463         if (unlikely(psock)) {
464                 WARN_ON(kcm->tx_wait);
465                 spin_unlock_bh(&mux->lock);
466                 return kcm->tx_psock;
467         }
468
469         if (!list_empty(&mux->psocks_avail)) {
470                 psock = list_first_entry(&mux->psocks_avail,
471                                          struct kcm_psock,
472                                          psock_avail_list);
473                 list_del(&psock->psock_avail_list);
474                 if (kcm->tx_wait) {
475                         list_del(&kcm->wait_psock_list);
476                         kcm->tx_wait = false;
477                 }
478                 kcm->tx_psock = psock;
479                 psock->tx_kcm = kcm;
480                 KCM_STATS_INCR(psock->stats.reserved);
481         } else if (!kcm->tx_wait) {
482                 list_add_tail(&kcm->wait_psock_list,
483                               &mux->kcm_tx_waiters);
484                 kcm->tx_wait = true;
485         }
486
487         spin_unlock_bh(&mux->lock);
488
489         return psock;
490 }
491
492 /* mux lock held */
493 static void psock_now_avail(struct kcm_psock *psock)
494 {
495         struct kcm_mux *mux = psock->mux;
496         struct kcm_sock *kcm;
497
498         if (list_empty(&mux->kcm_tx_waiters)) {
499                 list_add_tail(&psock->psock_avail_list,
500                               &mux->psocks_avail);
501         } else {
502                 kcm = list_first_entry(&mux->kcm_tx_waiters,
503                                        struct kcm_sock,
504                                        wait_psock_list);
505                 list_del(&kcm->wait_psock_list);
506                 kcm->tx_wait = false;
507                 psock->tx_kcm = kcm;
508
509                 /* Commit before changing tx_psock since that is read in
510                  * reserve_psock before queuing work.
511                  */
512                 smp_mb();
513
514                 kcm->tx_psock = psock;
515                 KCM_STATS_INCR(psock->stats.reserved);
516                 queue_work(kcm_wq, &kcm->tx_work);
517         }
518 }
519
520 /* kcm sock is locked. */
521 static void unreserve_psock(struct kcm_sock *kcm)
522 {
523         struct kcm_psock *psock;
524         struct kcm_mux *mux = kcm->mux;
525
526         spin_lock_bh(&mux->lock);
527
528         psock = kcm->tx_psock;
529
530         if (WARN_ON(!psock)) {
531                 spin_unlock_bh(&mux->lock);
532                 return;
533         }
534
535         smp_rmb(); /* Read tx_psock before tx_wait */
536
537         kcm_update_tx_mux_stats(mux, psock);
538
539         WARN_ON(kcm->tx_wait);
540
541         kcm->tx_psock = NULL;
542         psock->tx_kcm = NULL;
543         KCM_STATS_INCR(psock->stats.unreserved);
544
545         if (unlikely(psock->tx_stopped)) {
546                 if (psock->done) {
547                         /* Deferred free */
548                         list_del(&psock->psock_list);
549                         mux->psocks_cnt--;
550                         sock_put(psock->sk);
551                         fput(psock->sk->sk_socket->file);
552                         kmem_cache_free(kcm_psockp, psock);
553                 }
554
555                 /* Don't put back on available list */
556
557                 spin_unlock_bh(&mux->lock);
558
559                 return;
560         }
561
562         psock_now_avail(psock);
563
564         spin_unlock_bh(&mux->lock);
565 }
566
567 static void kcm_report_tx_retry(struct kcm_sock *kcm)
568 {
569         struct kcm_mux *mux = kcm->mux;
570
571         spin_lock_bh(&mux->lock);
572         KCM_STATS_INCR(mux->stats.tx_retries);
573         spin_unlock_bh(&mux->lock);
574 }
575
576 /* Write any messages ready on the kcm socket.  Called with kcm sock lock
577  * held.  Return bytes actually sent or error.
578  */
579 static int kcm_write_msgs(struct kcm_sock *kcm)
580 {
581         struct sock *sk = &kcm->sk;
582         struct kcm_psock *psock;
583         struct sk_buff *skb, *head;
584         struct kcm_tx_msg *txm;
585         unsigned short fragidx, frag_offset;
586         unsigned int sent, total_sent = 0;
587         int ret = 0;
588
589         kcm->tx_wait_more = false;
590         psock = kcm->tx_psock;
591         if (unlikely(psock && psock->tx_stopped)) {
592                 /* A reserved psock was aborted asynchronously. Unreserve
593                  * it and we'll retry the message.
594                  */
595                 unreserve_psock(kcm);
596                 kcm_report_tx_retry(kcm);
597                 if (skb_queue_empty(&sk->sk_write_queue))
598                         return 0;
599
600                 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
601
602         } else if (skb_queue_empty(&sk->sk_write_queue)) {
603                 return 0;
604         }
605
606         head = skb_peek(&sk->sk_write_queue);
607         txm = kcm_tx_msg(head);
608
609         if (txm->sent) {
610                 /* Send of first skbuff in queue already in progress */
611                 if (WARN_ON(!psock)) {
612                         ret = -EINVAL;
613                         goto out;
614                 }
615                 sent = txm->sent;
616                 frag_offset = txm->frag_offset;
617                 fragidx = txm->fragidx;
618                 skb = txm->frag_skb;
619
620                 goto do_frag;
621         }
622
623 try_again:
624         psock = reserve_psock(kcm);
625         if (!psock)
626                 goto out;
627
628         do {
629                 skb = head;
630                 txm = kcm_tx_msg(head);
631                 sent = 0;
632
633 do_frag_list:
634                 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
635                         ret = -EINVAL;
636                         goto out;
637                 }
638
639                 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
640                      fragidx++) {
641                         skb_frag_t *frag;
642
643                         frag_offset = 0;
644 do_frag:
645                         frag = &skb_shinfo(skb)->frags[fragidx];
646                         if (WARN_ON(!frag->size)) {
647                                 ret = -EINVAL;
648                                 goto out;
649                         }
650
651                         ret = kernel_sendpage(psock->sk->sk_socket,
652                                               frag->page.p,
653                                               frag->page_offset + frag_offset,
654                                               frag->size - frag_offset,
655                                               MSG_DONTWAIT);
656                         if (ret <= 0) {
657                                 if (ret == -EAGAIN) {
658                                         /* Save state to try again when there's
659                                          * write space on the socket
660                                          */
661                                         txm->sent = sent;
662                                         txm->frag_offset = frag_offset;
663                                         txm->fragidx = fragidx;
664                                         txm->frag_skb = skb;
665
666                                         ret = 0;
667                                         goto out;
668                                 }
669
670                                 /* Hard failure in sending message, abort this
671                                  * psock since it has lost framing
672                                  * synchonization and retry sending the
673                                  * message from the beginning.
674                                  */
675                                 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
676                                                    true);
677                                 unreserve_psock(kcm);
678
679                                 txm->sent = 0;
680                                 kcm_report_tx_retry(kcm);
681                                 ret = 0;
682
683                                 goto try_again;
684                         }
685
686                         sent += ret;
687                         frag_offset += ret;
688                         KCM_STATS_ADD(psock->stats.tx_bytes, ret);
689                         if (frag_offset < frag->size) {
690                                 /* Not finished with this frag */
691                                 goto do_frag;
692                         }
693                 }
694
695                 if (skb == head) {
696                         if (skb_has_frag_list(skb)) {
697                                 skb = skb_shinfo(skb)->frag_list;
698                                 goto do_frag_list;
699                         }
700                 } else if (skb->next) {
701                         skb = skb->next;
702                         goto do_frag_list;
703                 }
704
705                 /* Successfully sent the whole packet, account for it. */
706                 skb_dequeue(&sk->sk_write_queue);
707                 kfree_skb(head);
708                 sk->sk_wmem_queued -= sent;
709                 total_sent += sent;
710                 KCM_STATS_INCR(psock->stats.tx_msgs);
711         } while ((head = skb_peek(&sk->sk_write_queue)));
712 out:
713         if (!head) {
714                 /* Done with all queued messages. */
715                 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
716                 unreserve_psock(kcm);
717         }
718
719         /* Check if write space is available */
720         sk->sk_write_space(sk);
721
722         return total_sent ? : ret;
723 }
724
725 static void kcm_tx_work(struct work_struct *w)
726 {
727         struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
728         struct sock *sk = &kcm->sk;
729         int err;
730
731         lock_sock(sk);
732
733         /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
734          * aborts
735          */
736         err = kcm_write_msgs(kcm);
737         if (err < 0) {
738                 /* Hard failure in write, report error on KCM socket */
739                 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
740                 report_csk_error(&kcm->sk, -err);
741                 goto out;
742         }
743
744         /* Primarily for SOCK_SEQPACKET sockets */
745         if (likely(sk->sk_socket) &&
746             test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
747                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
748                 sk->sk_write_space(sk);
749         }
750
751 out:
752         release_sock(sk);
753 }
754
755 static void kcm_push(struct kcm_sock *kcm)
756 {
757         if (kcm->tx_wait_more)
758                 kcm_write_msgs(kcm);
759 }
760
761 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
762                             int offset, size_t size, int flags)
763
764 {
765         struct sock *sk = sock->sk;
766         struct kcm_sock *kcm = kcm_sk(sk);
767         struct sk_buff *skb = NULL, *head = NULL;
768         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
769         bool eor;
770         int err = 0;
771         int i;
772
773         if (flags & MSG_SENDPAGE_NOTLAST)
774                 flags |= MSG_MORE;
775
776         /* No MSG_EOR from splice, only look at MSG_MORE */
777         eor = !(flags & MSG_MORE);
778
779         lock_sock(sk);
780
781         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
782
783         err = -EPIPE;
784         if (sk->sk_err)
785                 goto out_error;
786
787         if (kcm->seq_skb) {
788                 /* Previously opened message */
789                 head = kcm->seq_skb;
790                 skb = kcm_tx_msg(head)->last_skb;
791                 i = skb_shinfo(skb)->nr_frags;
792
793                 if (skb_can_coalesce(skb, i, page, offset)) {
794                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
795                         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
796                         goto coalesced;
797                 }
798
799                 if (i >= MAX_SKB_FRAGS) {
800                         struct sk_buff *tskb;
801
802                         tskb = alloc_skb(0, sk->sk_allocation);
803                         while (!tskb) {
804                                 kcm_push(kcm);
805                                 err = sk_stream_wait_memory(sk, &timeo);
806                                 if (err)
807                                         goto out_error;
808                         }
809
810                         if (head == skb)
811                                 skb_shinfo(head)->frag_list = tskb;
812                         else
813                                 skb->next = tskb;
814
815                         skb = tskb;
816                         skb->ip_summed = CHECKSUM_UNNECESSARY;
817                         i = 0;
818                 }
819         } else {
820                 /* Call the sk_stream functions to manage the sndbuf mem. */
821                 if (!sk_stream_memory_free(sk)) {
822                         kcm_push(kcm);
823                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
824                         err = sk_stream_wait_memory(sk, &timeo);
825                         if (err)
826                                 goto out_error;
827                 }
828
829                 head = alloc_skb(0, sk->sk_allocation);
830                 while (!head) {
831                         kcm_push(kcm);
832                         err = sk_stream_wait_memory(sk, &timeo);
833                         if (err)
834                                 goto out_error;
835                 }
836
837                 skb = head;
838                 i = 0;
839         }
840
841         get_page(page);
842         skb_fill_page_desc(skb, i, page, offset, size);
843         skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
844
845 coalesced:
846         skb->len += size;
847         skb->data_len += size;
848         skb->truesize += size;
849         sk->sk_wmem_queued += size;
850         sk_mem_charge(sk, size);
851
852         if (head != skb) {
853                 head->len += size;
854                 head->data_len += size;
855                 head->truesize += size;
856         }
857
858         if (eor) {
859                 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
860
861                 /* Message complete, queue it on send buffer */
862                 __skb_queue_tail(&sk->sk_write_queue, head);
863                 kcm->seq_skb = NULL;
864                 KCM_STATS_INCR(kcm->stats.tx_msgs);
865
866                 if (flags & MSG_BATCH) {
867                         kcm->tx_wait_more = true;
868                 } else if (kcm->tx_wait_more || not_busy) {
869                         err = kcm_write_msgs(kcm);
870                         if (err < 0) {
871                                 /* We got a hard error in write_msgs but have
872                                  * already queued this message. Report an error
873                                  * in the socket, but don't affect return value
874                                  * from sendmsg
875                                  */
876                                 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
877                                 report_csk_error(&kcm->sk, -err);
878                         }
879                 }
880         } else {
881                 /* Message not complete, save state */
882                 kcm->seq_skb = head;
883                 kcm_tx_msg(head)->last_skb = skb;
884         }
885
886         KCM_STATS_ADD(kcm->stats.tx_bytes, size);
887
888         release_sock(sk);
889         return size;
890
891 out_error:
892         kcm_push(kcm);
893
894         err = sk_stream_error(sk, flags, err);
895
896         /* make sure we wake any epoll edge trigger waiter */
897         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
898                 sk->sk_write_space(sk);
899
900         release_sock(sk);
901         return err;
902 }
903
904 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
905 {
906         struct sock *sk = sock->sk;
907         struct kcm_sock *kcm = kcm_sk(sk);
908         struct sk_buff *skb = NULL, *head = NULL;
909         size_t copy, copied = 0;
910         long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
911         int eor = (sock->type == SOCK_DGRAM) ?
912                   !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
913         int err = -EPIPE;
914
915         lock_sock(sk);
916
917         /* Per tcp_sendmsg this should be in poll */
918         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
919
920         if (sk->sk_err)
921                 goto out_error;
922
923         if (kcm->seq_skb) {
924                 /* Previously opened message */
925                 head = kcm->seq_skb;
926                 skb = kcm_tx_msg(head)->last_skb;
927                 goto start;
928         }
929
930         /* Call the sk_stream functions to manage the sndbuf mem. */
931         if (!sk_stream_memory_free(sk)) {
932                 kcm_push(kcm);
933                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
934                 err = sk_stream_wait_memory(sk, &timeo);
935                 if (err)
936                         goto out_error;
937         }
938
939         if (msg_data_left(msg)) {
940                 /* New message, alloc head skb */
941                 head = alloc_skb(0, sk->sk_allocation);
942                 while (!head) {
943                         kcm_push(kcm);
944                         err = sk_stream_wait_memory(sk, &timeo);
945                         if (err)
946                                 goto out_error;
947
948                         head = alloc_skb(0, sk->sk_allocation);
949                 }
950
951                 skb = head;
952
953                 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
954                  * csum_and_copy_from_iter from skb_do_copy_data_nocache.
955                  */
956                 skb->ip_summed = CHECKSUM_UNNECESSARY;
957         }
958
959 start:
960         while (msg_data_left(msg)) {
961                 bool merge = true;
962                 int i = skb_shinfo(skb)->nr_frags;
963                 struct page_frag *pfrag = sk_page_frag(sk);
964
965                 if (!sk_page_frag_refill(sk, pfrag))
966                         goto wait_for_memory;
967
968                 if (!skb_can_coalesce(skb, i, pfrag->page,
969                                       pfrag->offset)) {
970                         if (i == MAX_SKB_FRAGS) {
971                                 struct sk_buff *tskb;
972
973                                 tskb = alloc_skb(0, sk->sk_allocation);
974                                 if (!tskb)
975                                         goto wait_for_memory;
976
977                                 if (head == skb)
978                                         skb_shinfo(head)->frag_list = tskb;
979                                 else
980                                         skb->next = tskb;
981
982                                 skb = tskb;
983                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
984                                 continue;
985                         }
986                         merge = false;
987                 }
988
989                 copy = min_t(int, msg_data_left(msg),
990                              pfrag->size - pfrag->offset);
991
992                 if (!sk_wmem_schedule(sk, copy))
993                         goto wait_for_memory;
994
995                 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
996                                                pfrag->page,
997                                                pfrag->offset,
998                                                copy);
999                 if (err)
1000                         goto out_error;
1001
1002                 /* Update the skb. */
1003                 if (merge) {
1004                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1005                 } else {
1006                         skb_fill_page_desc(skb, i, pfrag->page,
1007                                            pfrag->offset, copy);
1008                         get_page(pfrag->page);
1009                 }
1010
1011                 pfrag->offset += copy;
1012                 copied += copy;
1013                 if (head != skb) {
1014                         head->len += copy;
1015                         head->data_len += copy;
1016                 }
1017
1018                 continue;
1019
1020 wait_for_memory:
1021                 kcm_push(kcm);
1022                 err = sk_stream_wait_memory(sk, &timeo);
1023                 if (err)
1024                         goto out_error;
1025         }
1026
1027         if (eor) {
1028                 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1029
1030                 if (head) {
1031                         /* Message complete, queue it on send buffer */
1032                         __skb_queue_tail(&sk->sk_write_queue, head);
1033                         kcm->seq_skb = NULL;
1034                         KCM_STATS_INCR(kcm->stats.tx_msgs);
1035                 }
1036
1037                 if (msg->msg_flags & MSG_BATCH) {
1038                         kcm->tx_wait_more = true;
1039                 } else if (kcm->tx_wait_more || not_busy) {
1040                         err = kcm_write_msgs(kcm);
1041                         if (err < 0) {
1042                                 /* We got a hard error in write_msgs but have
1043                                  * already queued this message. Report an error
1044                                  * in the socket, but don't affect return value
1045                                  * from sendmsg
1046                                  */
1047                                 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1048                                 report_csk_error(&kcm->sk, -err);
1049                         }
1050                 }
1051         } else {
1052                 /* Message not complete, save state */
1053 partial_message:
1054                 if (head) {
1055                         kcm->seq_skb = head;
1056                         kcm_tx_msg(head)->last_skb = skb;
1057                 }
1058         }
1059
1060         KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1061
1062         release_sock(sk);
1063         return copied;
1064
1065 out_error:
1066         kcm_push(kcm);
1067
1068         if (sock->type == SOCK_SEQPACKET) {
1069                 /* Wrote some bytes before encountering an
1070                  * error, return partial success.
1071                  */
1072                 if (copied)
1073                         goto partial_message;
1074                 if (head != kcm->seq_skb)
1075                         kfree_skb(head);
1076         } else {
1077                 kfree_skb(head);
1078                 kcm->seq_skb = NULL;
1079         }
1080
1081         err = sk_stream_error(sk, msg->msg_flags, err);
1082
1083         /* make sure we wake any epoll edge trigger waiter */
1084         if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1085                 sk->sk_write_space(sk);
1086
1087         release_sock(sk);
1088         return err;
1089 }
1090
1091 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1092                        size_t len, int flags)
1093 {
1094         int noblock = flags & MSG_DONTWAIT;
1095         struct sock *sk = sock->sk;
1096         struct kcm_sock *kcm = kcm_sk(sk);
1097         int err = 0;
1098         struct strp_msg *stm;
1099         int copied = 0;
1100         struct sk_buff *skb;
1101
1102         skb = skb_recv_datagram(sk, flags, noblock, &err);
1103         if (!skb)
1104                 goto out;
1105
1106         /* Okay, have a message on the receive queue */
1107
1108         stm = strp_msg(skb);
1109
1110         if (len > stm->full_len)
1111                 len = stm->full_len;
1112
1113         err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1114         if (err < 0)
1115                 goto out;
1116
1117         copied = len;
1118         if (likely(!(flags & MSG_PEEK))) {
1119                 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1120                 if (copied < stm->full_len) {
1121                         if (sock->type == SOCK_DGRAM) {
1122                                 /* Truncated message */
1123                                 msg->msg_flags |= MSG_TRUNC;
1124                                 goto msg_finished;
1125                         }
1126                         stm->offset += copied;
1127                         stm->full_len -= copied;
1128                 } else {
1129 msg_finished:
1130                         /* Finished with message */
1131                         msg->msg_flags |= MSG_EOR;
1132                         KCM_STATS_INCR(kcm->stats.rx_msgs);
1133                 }
1134         }
1135
1136 out:
1137         skb_free_datagram(sk, skb);
1138         return copied ? : err;
1139 }
1140
1141 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1142                                struct pipe_inode_info *pipe, size_t len,
1143                                unsigned int flags)
1144 {
1145         int noblock = flags & MSG_DONTWAIT;
1146         struct sock *sk = sock->sk;
1147         struct kcm_sock *kcm = kcm_sk(sk);
1148         struct strp_msg *stm;
1149         int err = 0;
1150         ssize_t copied;
1151         struct sk_buff *skb;
1152
1153         /* Only support splice for SOCKSEQPACKET */
1154
1155         skb = skb_recv_datagram(sk, flags, noblock, &err);
1156         if (!skb)
1157                 goto err_out;
1158
1159         /* Okay, have a message on the receive queue */
1160
1161         stm = strp_msg(skb);
1162
1163         if (len > stm->full_len)
1164                 len = stm->full_len;
1165
1166         copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1167         if (copied < 0) {
1168                 err = copied;
1169                 goto err_out;
1170         }
1171
1172         KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1173
1174         stm->offset += copied;
1175         stm->full_len -= copied;
1176
1177         /* We have no way to return MSG_EOR. If all the bytes have been
1178          * read we still leave the message in the receive socket buffer.
1179          * A subsequent recvmsg needs to be done to return MSG_EOR and
1180          * finish reading the message.
1181          */
1182
1183         skb_free_datagram(sk, skb);
1184         return copied;
1185
1186 err_out:
1187         skb_free_datagram(sk, skb);
1188         return err;
1189 }
1190
1191 /* kcm sock lock held */
1192 static void kcm_recv_disable(struct kcm_sock *kcm)
1193 {
1194         struct kcm_mux *mux = kcm->mux;
1195
1196         if (kcm->rx_disabled)
1197                 return;
1198
1199         spin_lock_bh(&mux->rx_lock);
1200
1201         kcm->rx_disabled = 1;
1202
1203         /* If a psock is reserved we'll do cleanup in unreserve */
1204         if (!kcm->rx_psock) {
1205                 if (kcm->rx_wait) {
1206                         list_del(&kcm->wait_rx_list);
1207                         /* paired with lockless reads in kcm_rfree() */
1208                         WRITE_ONCE(kcm->rx_wait, false);
1209                 }
1210
1211                 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1212         }
1213
1214         spin_unlock_bh(&mux->rx_lock);
1215 }
1216
1217 /* kcm sock lock held */
1218 static void kcm_recv_enable(struct kcm_sock *kcm)
1219 {
1220         struct kcm_mux *mux = kcm->mux;
1221
1222         if (!kcm->rx_disabled)
1223                 return;
1224
1225         spin_lock_bh(&mux->rx_lock);
1226
1227         kcm->rx_disabled = 0;
1228         kcm_rcv_ready(kcm);
1229
1230         spin_unlock_bh(&mux->rx_lock);
1231 }
1232
1233 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1234                           char __user *optval, unsigned int optlen)
1235 {
1236         struct kcm_sock *kcm = kcm_sk(sock->sk);
1237         int val, valbool;
1238         int err = 0;
1239
1240         if (level != SOL_KCM)
1241                 return -ENOPROTOOPT;
1242
1243         if (optlen < sizeof(int))
1244                 return -EINVAL;
1245
1246         if (get_user(val, (int __user *)optval))
1247                 return -EINVAL;
1248
1249         valbool = val ? 1 : 0;
1250
1251         switch (optname) {
1252         case KCM_RECV_DISABLE:
1253                 lock_sock(&kcm->sk);
1254                 if (valbool)
1255                         kcm_recv_disable(kcm);
1256                 else
1257                         kcm_recv_enable(kcm);
1258                 release_sock(&kcm->sk);
1259                 break;
1260         default:
1261                 err = -ENOPROTOOPT;
1262         }
1263
1264         return err;
1265 }
1266
1267 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1268                           char __user *optval, int __user *optlen)
1269 {
1270         struct kcm_sock *kcm = kcm_sk(sock->sk);
1271         int val, len;
1272
1273         if (level != SOL_KCM)
1274                 return -ENOPROTOOPT;
1275
1276         if (get_user(len, optlen))
1277                 return -EFAULT;
1278
1279         len = min_t(unsigned int, len, sizeof(int));
1280         if (len < 0)
1281                 return -EINVAL;
1282
1283         switch (optname) {
1284         case KCM_RECV_DISABLE:
1285                 val = kcm->rx_disabled;
1286                 break;
1287         default:
1288                 return -ENOPROTOOPT;
1289         }
1290
1291         if (put_user(len, optlen))
1292                 return -EFAULT;
1293         if (copy_to_user(optval, &val, len))
1294                 return -EFAULT;
1295         return 0;
1296 }
1297
1298 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1299 {
1300         struct kcm_sock *tkcm;
1301         struct list_head *head;
1302         int index = 0;
1303
1304         /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1305          * we set sk_state, otherwise epoll_wait always returns right away with
1306          * EPOLLHUP
1307          */
1308         kcm->sk.sk_state = TCP_ESTABLISHED;
1309
1310         /* Add to mux's kcm sockets list */
1311         kcm->mux = mux;
1312         spin_lock_bh(&mux->lock);
1313
1314         head = &mux->kcm_socks;
1315         list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1316                 if (tkcm->index != index)
1317                         break;
1318                 head = &tkcm->kcm_sock_list;
1319                 index++;
1320         }
1321
1322         list_add(&kcm->kcm_sock_list, head);
1323         kcm->index = index;
1324
1325         mux->kcm_socks_cnt++;
1326         spin_unlock_bh(&mux->lock);
1327
1328         INIT_WORK(&kcm->tx_work, kcm_tx_work);
1329
1330         spin_lock_bh(&mux->rx_lock);
1331         kcm_rcv_ready(kcm);
1332         spin_unlock_bh(&mux->rx_lock);
1333 }
1334
1335 static int kcm_attach(struct socket *sock, struct socket *csock,
1336                       struct bpf_prog *prog)
1337 {
1338         struct kcm_sock *kcm = kcm_sk(sock->sk);
1339         struct kcm_mux *mux = kcm->mux;
1340         struct sock *csk;
1341         struct kcm_psock *psock = NULL, *tpsock;
1342         struct list_head *head;
1343         int index = 0;
1344         static const struct strp_callbacks cb = {
1345                 .rcv_msg = kcm_rcv_strparser,
1346                 .parse_msg = kcm_parse_func_strparser,
1347                 .read_sock_done = kcm_read_sock_done,
1348         };
1349         int err = 0;
1350
1351         csk = csock->sk;
1352         if (!csk)
1353                 return -EINVAL;
1354
1355         lock_sock(csk);
1356
1357         /* Only allow TCP sockets to be attached for now */
1358         if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1359             csk->sk_protocol != IPPROTO_TCP) {
1360                 err = -EOPNOTSUPP;
1361                 goto out;
1362         }
1363
1364         /* Don't allow listeners or closed sockets */
1365         if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1366                 err = -EOPNOTSUPP;
1367                 goto out;
1368         }
1369
1370         psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1371         if (!psock) {
1372                 err = -ENOMEM;
1373                 goto out;
1374         }
1375
1376         psock->mux = mux;
1377         psock->sk = csk;
1378         psock->bpf_prog = prog;
1379
1380         write_lock_bh(&csk->sk_callback_lock);
1381
1382         /* Check if sk_user_data is aready by KCM or someone else.
1383          * Must be done under lock to prevent race conditions.
1384          */
1385         if (csk->sk_user_data) {
1386                 write_unlock_bh(&csk->sk_callback_lock);
1387                 kmem_cache_free(kcm_psockp, psock);
1388                 err = -EALREADY;
1389                 goto out;
1390         }
1391
1392         err = strp_init(&psock->strp, csk, &cb);
1393         if (err) {
1394                 write_unlock_bh(&csk->sk_callback_lock);
1395                 kmem_cache_free(kcm_psockp, psock);
1396                 goto out;
1397         }
1398
1399         psock->save_data_ready = csk->sk_data_ready;
1400         psock->save_write_space = csk->sk_write_space;
1401         psock->save_state_change = csk->sk_state_change;
1402         csk->sk_user_data = psock;
1403         csk->sk_data_ready = psock_data_ready;
1404         csk->sk_write_space = psock_write_space;
1405         csk->sk_state_change = psock_state_change;
1406
1407         write_unlock_bh(&csk->sk_callback_lock);
1408
1409         sock_hold(csk);
1410
1411         /* Finished initialization, now add the psock to the MUX. */
1412         spin_lock_bh(&mux->lock);
1413         head = &mux->psocks;
1414         list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1415                 if (tpsock->index != index)
1416                         break;
1417                 head = &tpsock->psock_list;
1418                 index++;
1419         }
1420
1421         list_add(&psock->psock_list, head);
1422         psock->index = index;
1423
1424         KCM_STATS_INCR(mux->stats.psock_attach);
1425         mux->psocks_cnt++;
1426         psock_now_avail(psock);
1427         spin_unlock_bh(&mux->lock);
1428
1429         /* Schedule RX work in case there are already bytes queued */
1430         strp_check_rcv(&psock->strp);
1431
1432 out:
1433         release_sock(csk);
1434
1435         return err;
1436 }
1437
1438 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1439 {
1440         struct socket *csock;
1441         struct bpf_prog *prog;
1442         int err;
1443
1444         csock = sockfd_lookup(info->fd, &err);
1445         if (!csock)
1446                 return -ENOENT;
1447
1448         prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1449         if (IS_ERR(prog)) {
1450                 err = PTR_ERR(prog);
1451                 goto out;
1452         }
1453
1454         err = kcm_attach(sock, csock, prog);
1455         if (err) {
1456                 bpf_prog_put(prog);
1457                 goto out;
1458         }
1459
1460         /* Keep reference on file also */
1461
1462         return 0;
1463 out:
1464         fput(csock->file);
1465         return err;
1466 }
1467
1468 static void kcm_unattach(struct kcm_psock *psock)
1469 {
1470         struct sock *csk = psock->sk;
1471         struct kcm_mux *mux = psock->mux;
1472
1473         lock_sock(csk);
1474
1475         /* Stop getting callbacks from TCP socket. After this there should
1476          * be no way to reserve a kcm for this psock.
1477          */
1478         write_lock_bh(&csk->sk_callback_lock);
1479         csk->sk_user_data = NULL;
1480         csk->sk_data_ready = psock->save_data_ready;
1481         csk->sk_write_space = psock->save_write_space;
1482         csk->sk_state_change = psock->save_state_change;
1483         strp_stop(&psock->strp);
1484
1485         if (WARN_ON(psock->rx_kcm)) {
1486                 write_unlock_bh(&csk->sk_callback_lock);
1487                 release_sock(csk);
1488                 return;
1489         }
1490
1491         spin_lock_bh(&mux->rx_lock);
1492
1493         /* Stop receiver activities. After this point psock should not be
1494          * able to get onto ready list either through callbacks or work.
1495          */
1496         if (psock->ready_rx_msg) {
1497                 list_del(&psock->psock_ready_list);
1498                 kfree_skb(psock->ready_rx_msg);
1499                 psock->ready_rx_msg = NULL;
1500                 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1501         }
1502
1503         spin_unlock_bh(&mux->rx_lock);
1504
1505         write_unlock_bh(&csk->sk_callback_lock);
1506
1507         /* Call strp_done without sock lock */
1508         release_sock(csk);
1509         strp_done(&psock->strp);
1510         lock_sock(csk);
1511
1512         bpf_prog_put(psock->bpf_prog);
1513
1514         spin_lock_bh(&mux->lock);
1515
1516         aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1517         save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1518
1519         KCM_STATS_INCR(mux->stats.psock_unattach);
1520
1521         if (psock->tx_kcm) {
1522                 /* psock was reserved.  Just mark it finished and we will clean
1523                  * up in the kcm paths, we need kcm lock which can not be
1524                  * acquired here.
1525                  */
1526                 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1527                 spin_unlock_bh(&mux->lock);
1528
1529                 /* We are unattaching a socket that is reserved. Abort the
1530                  * socket since we may be out of sync in sending on it. We need
1531                  * to do this without the mux lock.
1532                  */
1533                 kcm_abort_tx_psock(psock, EPIPE, false);
1534
1535                 spin_lock_bh(&mux->lock);
1536                 if (!psock->tx_kcm) {
1537                         /* psock now unreserved in window mux was unlocked */
1538                         goto no_reserved;
1539                 }
1540                 psock->done = 1;
1541
1542                 /* Commit done before queuing work to process it */
1543                 smp_mb();
1544
1545                 /* Queue tx work to make sure psock->done is handled */
1546                 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1547                 spin_unlock_bh(&mux->lock);
1548         } else {
1549 no_reserved:
1550                 if (!psock->tx_stopped)
1551                         list_del(&psock->psock_avail_list);
1552                 list_del(&psock->psock_list);
1553                 mux->psocks_cnt--;
1554                 spin_unlock_bh(&mux->lock);
1555
1556                 sock_put(csk);
1557                 fput(csk->sk_socket->file);
1558                 kmem_cache_free(kcm_psockp, psock);
1559         }
1560
1561         release_sock(csk);
1562 }
1563
1564 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1565 {
1566         struct kcm_sock *kcm = kcm_sk(sock->sk);
1567         struct kcm_mux *mux = kcm->mux;
1568         struct kcm_psock *psock;
1569         struct socket *csock;
1570         struct sock *csk;
1571         int err;
1572
1573         csock = sockfd_lookup(info->fd, &err);
1574         if (!csock)
1575                 return -ENOENT;
1576
1577         csk = csock->sk;
1578         if (!csk) {
1579                 err = -EINVAL;
1580                 goto out;
1581         }
1582
1583         err = -ENOENT;
1584
1585         spin_lock_bh(&mux->lock);
1586
1587         list_for_each_entry(psock, &mux->psocks, psock_list) {
1588                 if (psock->sk != csk)
1589                         continue;
1590
1591                 /* Found the matching psock */
1592
1593                 if (psock->unattaching || WARN_ON(psock->done)) {
1594                         err = -EALREADY;
1595                         break;
1596                 }
1597
1598                 psock->unattaching = 1;
1599
1600                 spin_unlock_bh(&mux->lock);
1601
1602                 /* Lower socket lock should already be held */
1603                 kcm_unattach(psock);
1604
1605                 err = 0;
1606                 goto out;
1607         }
1608
1609         spin_unlock_bh(&mux->lock);
1610
1611 out:
1612         fput(csock->file);
1613         return err;
1614 }
1615
1616 static struct proto kcm_proto = {
1617         .name   = "KCM",
1618         .owner  = THIS_MODULE,
1619         .obj_size = sizeof(struct kcm_sock),
1620 };
1621
1622 /* Clone a kcm socket. */
1623 static struct file *kcm_clone(struct socket *osock)
1624 {
1625         struct socket *newsock;
1626         struct sock *newsk;
1627
1628         newsock = sock_alloc();
1629         if (!newsock)
1630                 return ERR_PTR(-ENFILE);
1631
1632         newsock->type = osock->type;
1633         newsock->ops = osock->ops;
1634
1635         __module_get(newsock->ops->owner);
1636
1637         newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1638                          &kcm_proto, false);
1639         if (!newsk) {
1640                 sock_release(newsock);
1641                 return ERR_PTR(-ENOMEM);
1642         }
1643         sock_init_data(newsock, newsk);
1644         init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1645
1646         return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1647 }
1648
1649 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1650 {
1651         int err;
1652
1653         switch (cmd) {
1654         case SIOCKCMATTACH: {
1655                 struct kcm_attach info;
1656
1657                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1658                         return -EFAULT;
1659
1660                 err = kcm_attach_ioctl(sock, &info);
1661
1662                 break;
1663         }
1664         case SIOCKCMUNATTACH: {
1665                 struct kcm_unattach info;
1666
1667                 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1668                         return -EFAULT;
1669
1670                 err = kcm_unattach_ioctl(sock, &info);
1671
1672                 break;
1673         }
1674         case SIOCKCMCLONE: {
1675                 struct kcm_clone info;
1676                 struct file *file;
1677
1678                 info.fd = get_unused_fd_flags(0);
1679                 if (unlikely(info.fd < 0))
1680                         return info.fd;
1681
1682                 file = kcm_clone(sock);
1683                 if (IS_ERR(file)) {
1684                         put_unused_fd(info.fd);
1685                         return PTR_ERR(file);
1686                 }
1687                 if (copy_to_user((void __user *)arg, &info,
1688                                  sizeof(info))) {
1689                         put_unused_fd(info.fd);
1690                         fput(file);
1691                         return -EFAULT;
1692                 }
1693                 fd_install(info.fd, file);
1694                 err = 0;
1695                 break;
1696         }
1697         default:
1698                 err = -ENOIOCTLCMD;
1699                 break;
1700         }
1701
1702         return err;
1703 }
1704
1705 static void free_mux(struct rcu_head *rcu)
1706 {
1707         struct kcm_mux *mux = container_of(rcu,
1708             struct kcm_mux, rcu);
1709
1710         kmem_cache_free(kcm_muxp, mux);
1711 }
1712
1713 static void release_mux(struct kcm_mux *mux)
1714 {
1715         struct kcm_net *knet = mux->knet;
1716         struct kcm_psock *psock, *tmp_psock;
1717
1718         /* Release psocks */
1719         list_for_each_entry_safe(psock, tmp_psock,
1720                                  &mux->psocks, psock_list) {
1721                 if (!WARN_ON(psock->unattaching))
1722                         kcm_unattach(psock);
1723         }
1724
1725         if (WARN_ON(mux->psocks_cnt))
1726                 return;
1727
1728         __skb_queue_purge(&mux->rx_hold_queue);
1729
1730         mutex_lock(&knet->mutex);
1731         aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1732         aggregate_psock_stats(&mux->aggregate_psock_stats,
1733                               &knet->aggregate_psock_stats);
1734         aggregate_strp_stats(&mux->aggregate_strp_stats,
1735                              &knet->aggregate_strp_stats);
1736         list_del_rcu(&mux->kcm_mux_list);
1737         knet->count--;
1738         mutex_unlock(&knet->mutex);
1739
1740         call_rcu(&mux->rcu, free_mux);
1741 }
1742
1743 static void kcm_done(struct kcm_sock *kcm)
1744 {
1745         struct kcm_mux *mux = kcm->mux;
1746         struct sock *sk = &kcm->sk;
1747         int socks_cnt;
1748
1749         spin_lock_bh(&mux->rx_lock);
1750         if (kcm->rx_psock) {
1751                 /* Cleanup in unreserve_rx_kcm */
1752                 WARN_ON(kcm->done);
1753                 kcm->rx_disabled = 1;
1754                 kcm->done = 1;
1755                 spin_unlock_bh(&mux->rx_lock);
1756                 return;
1757         }
1758
1759         if (kcm->rx_wait) {
1760                 list_del(&kcm->wait_rx_list);
1761                 /* paired with lockless reads in kcm_rfree() */
1762                 WRITE_ONCE(kcm->rx_wait, false);
1763         }
1764         /* Move any pending receive messages to other kcm sockets */
1765         requeue_rx_msgs(mux, &sk->sk_receive_queue);
1766
1767         spin_unlock_bh(&mux->rx_lock);
1768
1769         if (WARN_ON(sk_rmem_alloc_get(sk)))
1770                 return;
1771
1772         /* Detach from MUX */
1773         spin_lock_bh(&mux->lock);
1774
1775         list_del(&kcm->kcm_sock_list);
1776         mux->kcm_socks_cnt--;
1777         socks_cnt = mux->kcm_socks_cnt;
1778
1779         spin_unlock_bh(&mux->lock);
1780
1781         if (!socks_cnt) {
1782                 /* We are done with the mux now. */
1783                 release_mux(mux);
1784         }
1785
1786         WARN_ON(kcm->rx_wait);
1787
1788         sock_put(&kcm->sk);
1789 }
1790
1791 /* Called by kcm_release to close a KCM socket.
1792  * If this is the last KCM socket on the MUX, destroy the MUX.
1793  */
1794 static int kcm_release(struct socket *sock)
1795 {
1796         struct sock *sk = sock->sk;
1797         struct kcm_sock *kcm;
1798         struct kcm_mux *mux;
1799         struct kcm_psock *psock;
1800
1801         if (!sk)
1802                 return 0;
1803
1804         kcm = kcm_sk(sk);
1805         mux = kcm->mux;
1806
1807         lock_sock(sk);
1808         sock_orphan(sk);
1809         kfree_skb(kcm->seq_skb);
1810
1811         /* Purge queue under lock to avoid race condition with tx_work trying
1812          * to act when queue is nonempty. If tx_work runs after this point
1813          * it will just return.
1814          */
1815         __skb_queue_purge(&sk->sk_write_queue);
1816
1817         /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1818          * get a writespace callback. This prevents further work being queued
1819          * from the callback (unbinding the psock occurs after canceling work.
1820          */
1821         kcm->tx_stopped = 1;
1822
1823         release_sock(sk);
1824
1825         spin_lock_bh(&mux->lock);
1826         if (kcm->tx_wait) {
1827                 /* Take of tx_wait list, after this point there should be no way
1828                  * that a psock will be assigned to this kcm.
1829                  */
1830                 list_del(&kcm->wait_psock_list);
1831                 kcm->tx_wait = false;
1832         }
1833         spin_unlock_bh(&mux->lock);
1834
1835         /* Cancel work. After this point there should be no outside references
1836          * to the kcm socket.
1837          */
1838         cancel_work_sync(&kcm->tx_work);
1839
1840         lock_sock(sk);
1841         psock = kcm->tx_psock;
1842         if (psock) {
1843                 /* A psock was reserved, so we need to kill it since it
1844                  * may already have some bytes queued from a message. We
1845                  * need to do this after removing kcm from tx_wait list.
1846                  */
1847                 kcm_abort_tx_psock(psock, EPIPE, false);
1848                 unreserve_psock(kcm);
1849         }
1850         release_sock(sk);
1851
1852         WARN_ON(kcm->tx_wait);
1853         WARN_ON(kcm->tx_psock);
1854
1855         sock->sk = NULL;
1856
1857         kcm_done(kcm);
1858
1859         return 0;
1860 }
1861
1862 static const struct proto_ops kcm_dgram_ops = {
1863         .family =       PF_KCM,
1864         .owner =        THIS_MODULE,
1865         .release =      kcm_release,
1866         .bind =         sock_no_bind,
1867         .connect =      sock_no_connect,
1868         .socketpair =   sock_no_socketpair,
1869         .accept =       sock_no_accept,
1870         .getname =      sock_no_getname,
1871         .poll =         datagram_poll,
1872         .ioctl =        kcm_ioctl,
1873         .listen =       sock_no_listen,
1874         .shutdown =     sock_no_shutdown,
1875         .setsockopt =   kcm_setsockopt,
1876         .getsockopt =   kcm_getsockopt,
1877         .sendmsg =      kcm_sendmsg,
1878         .recvmsg =      kcm_recvmsg,
1879         .mmap =         sock_no_mmap,
1880         .sendpage =     kcm_sendpage,
1881 };
1882
1883 static const struct proto_ops kcm_seqpacket_ops = {
1884         .family =       PF_KCM,
1885         .owner =        THIS_MODULE,
1886         .release =      kcm_release,
1887         .bind =         sock_no_bind,
1888         .connect =      sock_no_connect,
1889         .socketpair =   sock_no_socketpair,
1890         .accept =       sock_no_accept,
1891         .getname =      sock_no_getname,
1892         .poll =         datagram_poll,
1893         .ioctl =        kcm_ioctl,
1894         .listen =       sock_no_listen,
1895         .shutdown =     sock_no_shutdown,
1896         .setsockopt =   kcm_setsockopt,
1897         .getsockopt =   kcm_getsockopt,
1898         .sendmsg =      kcm_sendmsg,
1899         .recvmsg =      kcm_recvmsg,
1900         .mmap =         sock_no_mmap,
1901         .sendpage =     kcm_sendpage,
1902         .splice_read =  kcm_splice_read,
1903 };
1904
1905 /* Create proto operation for kcm sockets */
1906 static int kcm_create(struct net *net, struct socket *sock,
1907                       int protocol, int kern)
1908 {
1909         struct kcm_net *knet = net_generic(net, kcm_net_id);
1910         struct sock *sk;
1911         struct kcm_mux *mux;
1912
1913         switch (sock->type) {
1914         case SOCK_DGRAM:
1915                 sock->ops = &kcm_dgram_ops;
1916                 break;
1917         case SOCK_SEQPACKET:
1918                 sock->ops = &kcm_seqpacket_ops;
1919                 break;
1920         default:
1921                 return -ESOCKTNOSUPPORT;
1922         }
1923
1924         if (protocol != KCMPROTO_CONNECTED)
1925                 return -EPROTONOSUPPORT;
1926
1927         sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1928         if (!sk)
1929                 return -ENOMEM;
1930
1931         /* Allocate a kcm mux, shared between KCM sockets */
1932         mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1933         if (!mux) {
1934                 sk_free(sk);
1935                 return -ENOMEM;
1936         }
1937
1938         spin_lock_init(&mux->lock);
1939         spin_lock_init(&mux->rx_lock);
1940         INIT_LIST_HEAD(&mux->kcm_socks);
1941         INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1942         INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1943
1944         INIT_LIST_HEAD(&mux->psocks);
1945         INIT_LIST_HEAD(&mux->psocks_ready);
1946         INIT_LIST_HEAD(&mux->psocks_avail);
1947
1948         mux->knet = knet;
1949
1950         /* Add new MUX to list */
1951         mutex_lock(&knet->mutex);
1952         list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1953         knet->count++;
1954         mutex_unlock(&knet->mutex);
1955
1956         skb_queue_head_init(&mux->rx_hold_queue);
1957
1958         /* Init KCM socket */
1959         sock_init_data(sock, sk);
1960         init_kcm_sock(kcm_sk(sk), mux);
1961
1962         return 0;
1963 }
1964
1965 static const struct net_proto_family kcm_family_ops = {
1966         .family = PF_KCM,
1967         .create = kcm_create,
1968         .owner  = THIS_MODULE,
1969 };
1970
1971 static __net_init int kcm_init_net(struct net *net)
1972 {
1973         struct kcm_net *knet = net_generic(net, kcm_net_id);
1974
1975         INIT_LIST_HEAD_RCU(&knet->mux_list);
1976         mutex_init(&knet->mutex);
1977
1978         return 0;
1979 }
1980
1981 static __net_exit void kcm_exit_net(struct net *net)
1982 {
1983         struct kcm_net *knet = net_generic(net, kcm_net_id);
1984
1985         /* All KCM sockets should be closed at this point, which should mean
1986          * that all multiplexors and psocks have been destroyed.
1987          */
1988         WARN_ON(!list_empty(&knet->mux_list));
1989
1990         mutex_destroy(&knet->mutex);
1991 }
1992
1993 static struct pernet_operations kcm_net_ops = {
1994         .init = kcm_init_net,
1995         .exit = kcm_exit_net,
1996         .id   = &kcm_net_id,
1997         .size = sizeof(struct kcm_net),
1998 };
1999
2000 static int __init kcm_init(void)
2001 {
2002         int err = -ENOMEM;
2003
2004         kcm_muxp = kmem_cache_create("kcm_mux_cache",
2005                                      sizeof(struct kcm_mux), 0,
2006                                      SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2007         if (!kcm_muxp)
2008                 goto fail;
2009
2010         kcm_psockp = kmem_cache_create("kcm_psock_cache",
2011                                        sizeof(struct kcm_psock), 0,
2012                                         SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2013         if (!kcm_psockp)
2014                 goto fail;
2015
2016         kcm_wq = create_singlethread_workqueue("kkcmd");
2017         if (!kcm_wq)
2018                 goto fail;
2019
2020         err = proto_register(&kcm_proto, 1);
2021         if (err)
2022                 goto fail;
2023
2024         err = register_pernet_device(&kcm_net_ops);
2025         if (err)
2026                 goto net_ops_fail;
2027
2028         err = sock_register(&kcm_family_ops);
2029         if (err)
2030                 goto sock_register_fail;
2031
2032         err = kcm_proc_init();
2033         if (err)
2034                 goto proc_init_fail;
2035
2036         return 0;
2037
2038 proc_init_fail:
2039         sock_unregister(PF_KCM);
2040
2041 sock_register_fail:
2042         unregister_pernet_device(&kcm_net_ops);
2043
2044 net_ops_fail:
2045         proto_unregister(&kcm_proto);
2046
2047 fail:
2048         kmem_cache_destroy(kcm_muxp);
2049         kmem_cache_destroy(kcm_psockp);
2050
2051         if (kcm_wq)
2052                 destroy_workqueue(kcm_wq);
2053
2054         return err;
2055 }
2056
2057 static void __exit kcm_exit(void)
2058 {
2059         kcm_proc_exit();
2060         sock_unregister(PF_KCM);
2061         unregister_pernet_device(&kcm_net_ops);
2062         proto_unregister(&kcm_proto);
2063         destroy_workqueue(kcm_wq);
2064
2065         kmem_cache_destroy(kcm_muxp);
2066         kmem_cache_destroy(kcm_psockp);
2067 }
2068
2069 module_init(kcm_init);
2070 module_exit(kcm_exit);
2071
2072 MODULE_LICENSE("GPL");
2073 MODULE_ALIAS_NETPROTO(PF_KCM);