2 * Kernel Connection Multiplexor
4 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
11 #include <linux/bpf.h>
12 #include <linux/errno.h>
13 #include <linux/errqueue.h>
14 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/net.h>
19 #include <linux/netdevice.h>
20 #include <linux/poll.h>
21 #include <linux/rculist.h>
22 #include <linux/skbuff.h>
23 #include <linux/socket.h>
24 #include <linux/uaccess.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
28 #include <net/netns/generic.h>
30 #include <uapi/linux/kcm.h>
32 unsigned int kcm_net_id;
34 static struct kmem_cache *kcm_psockp __read_mostly;
35 static struct kmem_cache *kcm_muxp __read_mostly;
36 static struct workqueue_struct *kcm_wq;
38 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
40 return (struct kcm_sock *)sk;
43 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
45 return (struct kcm_tx_msg *)skb->cb;
48 static void report_csk_error(struct sock *csk, int err)
51 csk->sk_error_report(csk);
54 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
57 struct sock *csk = psock->sk;
58 struct kcm_mux *mux = psock->mux;
60 /* Unrecoverable error in transmit */
62 spin_lock_bh(&mux->lock);
64 if (psock->tx_stopped) {
65 spin_unlock_bh(&mux->lock);
69 psock->tx_stopped = 1;
70 KCM_STATS_INCR(psock->stats.tx_aborts);
73 /* Take off psocks_avail list */
74 list_del(&psock->psock_avail_list);
75 } else if (wakeup_kcm) {
76 /* In this case psock is being aborted while outside of
77 * write_msgs and psock is reserved. Schedule tx_work
78 * to handle the failure there. Need to commit tx_stopped
79 * before queuing work.
83 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
86 spin_unlock_bh(&mux->lock);
88 /* Report error on lower socket */
89 report_csk_error(csk, err);
92 /* RX mux lock held. */
93 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
94 struct kcm_psock *psock)
96 STRP_STATS_ADD(mux->stats.rx_bytes,
97 psock->strp.stats.rx_bytes -
98 psock->saved_rx_bytes);
100 psock->strp.stats.rx_msgs - psock->saved_rx_msgs;
101 psock->saved_rx_msgs = psock->strp.stats.rx_msgs;
102 psock->saved_rx_bytes = psock->strp.stats.rx_bytes;
105 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
106 struct kcm_psock *psock)
108 KCM_STATS_ADD(mux->stats.tx_bytes,
109 psock->stats.tx_bytes - psock->saved_tx_bytes);
110 mux->stats.tx_msgs +=
111 psock->stats.tx_msgs - psock->saved_tx_msgs;
112 psock->saved_tx_msgs = psock->stats.tx_msgs;
113 psock->saved_tx_bytes = psock->stats.tx_bytes;
116 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
118 /* KCM is ready to receive messages on its queue-- either the KCM is new or
119 * has become unblocked after being blocked on full socket buffer. Queue any
120 * pending ready messages on a psock. RX mux lock held.
122 static void kcm_rcv_ready(struct kcm_sock *kcm)
124 struct kcm_mux *mux = kcm->mux;
125 struct kcm_psock *psock;
128 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
131 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
132 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
133 /* Assuming buffer limit has been reached */
134 skb_queue_head(&mux->rx_hold_queue, skb);
135 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
140 while (!list_empty(&mux->psocks_ready)) {
141 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
144 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
145 /* Assuming buffer limit has been reached */
146 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
150 /* Consumed the ready message on the psock. Schedule rx_work to
153 list_del(&psock->psock_ready_list);
154 psock->ready_rx_msg = NULL;
155 /* Commit clearing of ready_rx_msg for queuing work */
158 strp_unpause(&psock->strp);
159 strp_check_rcv(&psock->strp);
162 /* Buffer limit is okay now, add to ready list */
163 list_add_tail(&kcm->wait_rx_list,
164 &kcm->mux->kcm_rx_waiters);
165 /* paired with lockless reads in kcm_rfree() */
166 WRITE_ONCE(kcm->rx_wait, true);
169 static void kcm_rfree(struct sk_buff *skb)
171 struct sock *sk = skb->sk;
172 struct kcm_sock *kcm = kcm_sk(sk);
173 struct kcm_mux *mux = kcm->mux;
174 unsigned int len = skb->truesize;
176 sk_mem_uncharge(sk, len);
177 atomic_sub(len, &sk->sk_rmem_alloc);
179 /* For reading rx_wait and rx_psock without holding lock */
180 smp_mb__after_atomic();
182 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
183 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
184 spin_lock_bh(&mux->rx_lock);
186 spin_unlock_bh(&mux->rx_lock);
190 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
192 struct sk_buff_head *list = &sk->sk_receive_queue;
194 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
197 if (!sk_rmem_schedule(sk, skb, skb->truesize))
204 skb->destructor = kcm_rfree;
205 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
206 sk_mem_charge(sk, skb->truesize);
208 skb_queue_tail(list, skb);
210 if (!sock_flag(sk, SOCK_DEAD))
211 sk->sk_data_ready(sk);
216 /* Requeue received messages for a kcm socket to other kcm sockets. This is
217 * called with a kcm socket is receive disabled.
220 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
223 struct kcm_sock *kcm;
225 while ((skb = __skb_dequeue(head))) {
226 /* Reset destructor to avoid calling kcm_rcv_ready */
227 skb->destructor = sock_rfree;
230 if (list_empty(&mux->kcm_rx_waiters)) {
231 skb_queue_tail(&mux->rx_hold_queue, skb);
235 kcm = list_first_entry(&mux->kcm_rx_waiters,
236 struct kcm_sock, wait_rx_list);
238 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
239 /* Should mean socket buffer full */
240 list_del(&kcm->wait_rx_list);
241 /* paired with lockless reads in kcm_rfree() */
242 WRITE_ONCE(kcm->rx_wait, false);
244 /* Commit rx_wait to read in kcm_free */
252 /* Lower sock lock held */
253 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
254 struct sk_buff *head)
256 struct kcm_mux *mux = psock->mux;
257 struct kcm_sock *kcm;
259 WARN_ON(psock->ready_rx_msg);
262 return psock->rx_kcm;
264 spin_lock_bh(&mux->rx_lock);
267 spin_unlock_bh(&mux->rx_lock);
268 return psock->rx_kcm;
271 kcm_update_rx_mux_stats(mux, psock);
273 if (list_empty(&mux->kcm_rx_waiters)) {
274 psock->ready_rx_msg = head;
275 strp_pause(&psock->strp);
276 list_add_tail(&psock->psock_ready_list,
278 spin_unlock_bh(&mux->rx_lock);
282 kcm = list_first_entry(&mux->kcm_rx_waiters,
283 struct kcm_sock, wait_rx_list);
284 list_del(&kcm->wait_rx_list);
285 /* paired with lockless reads in kcm_rfree() */
286 WRITE_ONCE(kcm->rx_wait, false);
289 /* paired with lockless reads in kcm_rfree() */
290 WRITE_ONCE(kcm->rx_psock, psock);
292 spin_unlock_bh(&mux->rx_lock);
297 static void kcm_done(struct kcm_sock *kcm);
299 static void kcm_done_work(struct work_struct *w)
301 kcm_done(container_of(w, struct kcm_sock, done_work));
304 /* Lower sock held */
305 static void unreserve_rx_kcm(struct kcm_psock *psock,
308 struct kcm_sock *kcm = psock->rx_kcm;
309 struct kcm_mux *mux = psock->mux;
314 spin_lock_bh(&mux->rx_lock);
316 psock->rx_kcm = NULL;
317 /* paired with lockless reads in kcm_rfree() */
318 WRITE_ONCE(kcm->rx_psock, NULL);
320 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
325 if (unlikely(kcm->done)) {
326 spin_unlock_bh(&mux->rx_lock);
328 /* Need to run kcm_done in a task since we need to qcquire
329 * callback locks which may already be held here.
331 INIT_WORK(&kcm->done_work, kcm_done_work);
332 schedule_work(&kcm->done_work);
336 if (unlikely(kcm->rx_disabled)) {
337 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
338 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
339 /* Check for degenerative race with rx_wait that all
340 * data was dequeued (accounted for in kcm_rfree).
344 spin_unlock_bh(&mux->rx_lock);
347 /* Lower sock lock held */
348 static void psock_data_ready(struct sock *sk)
350 struct kcm_psock *psock;
352 read_lock_bh(&sk->sk_callback_lock);
354 psock = (struct kcm_psock *)sk->sk_user_data;
356 strp_data_ready(&psock->strp);
358 read_unlock_bh(&sk->sk_callback_lock);
361 /* Called with lower sock held */
362 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
364 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
365 struct kcm_sock *kcm;
368 kcm = reserve_rx_kcm(psock, skb);
370 /* Unable to reserve a KCM, message is held in psock and strp
376 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
377 /* Should mean socket buffer full */
378 unreserve_rx_kcm(psock, false);
383 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
385 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
386 struct bpf_prog *prog = psock->bpf_prog;
388 return (*prog->bpf_func)(skb, prog->insnsi);
391 static int kcm_read_sock_done(struct strparser *strp, int err)
393 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
395 unreserve_rx_kcm(psock, true);
400 static void psock_state_change(struct sock *sk)
402 /* TCP only does a POLLIN for a half close. Do a POLLHUP here
403 * since application will normally not poll with POLLIN
404 * on the TCP sockets.
407 report_csk_error(sk, EPIPE);
410 static void psock_write_space(struct sock *sk)
412 struct kcm_psock *psock;
414 struct kcm_sock *kcm;
416 read_lock_bh(&sk->sk_callback_lock);
418 psock = (struct kcm_psock *)sk->sk_user_data;
419 if (unlikely(!psock))
423 spin_lock_bh(&mux->lock);
425 /* Check if the socket is reserved so someone is waiting for sending. */
427 if (kcm && !unlikely(kcm->tx_stopped))
428 queue_work(kcm_wq, &kcm->tx_work);
430 spin_unlock_bh(&mux->lock);
432 read_unlock_bh(&sk->sk_callback_lock);
435 static void unreserve_psock(struct kcm_sock *kcm);
437 /* kcm sock is locked. */
438 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
440 struct kcm_mux *mux = kcm->mux;
441 struct kcm_psock *psock;
443 psock = kcm->tx_psock;
445 smp_rmb(); /* Must read tx_psock before tx_wait */
448 WARN_ON(kcm->tx_wait);
449 if (unlikely(psock->tx_stopped))
450 unreserve_psock(kcm);
452 return kcm->tx_psock;
455 spin_lock_bh(&mux->lock);
457 /* Check again under lock to see if psock was reserved for this
458 * psock via psock_unreserve.
460 psock = kcm->tx_psock;
461 if (unlikely(psock)) {
462 WARN_ON(kcm->tx_wait);
463 spin_unlock_bh(&mux->lock);
464 return kcm->tx_psock;
467 if (!list_empty(&mux->psocks_avail)) {
468 psock = list_first_entry(&mux->psocks_avail,
471 list_del(&psock->psock_avail_list);
473 list_del(&kcm->wait_psock_list);
474 kcm->tx_wait = false;
476 kcm->tx_psock = psock;
478 KCM_STATS_INCR(psock->stats.reserved);
479 } else if (!kcm->tx_wait) {
480 list_add_tail(&kcm->wait_psock_list,
481 &mux->kcm_tx_waiters);
485 spin_unlock_bh(&mux->lock);
491 static void psock_now_avail(struct kcm_psock *psock)
493 struct kcm_mux *mux = psock->mux;
494 struct kcm_sock *kcm;
496 if (list_empty(&mux->kcm_tx_waiters)) {
497 list_add_tail(&psock->psock_avail_list,
500 kcm = list_first_entry(&mux->kcm_tx_waiters,
503 list_del(&kcm->wait_psock_list);
504 kcm->tx_wait = false;
507 /* Commit before changing tx_psock since that is read in
508 * reserve_psock before queuing work.
512 kcm->tx_psock = psock;
513 KCM_STATS_INCR(psock->stats.reserved);
514 queue_work(kcm_wq, &kcm->tx_work);
518 /* kcm sock is locked. */
519 static void unreserve_psock(struct kcm_sock *kcm)
521 struct kcm_psock *psock;
522 struct kcm_mux *mux = kcm->mux;
524 spin_lock_bh(&mux->lock);
526 psock = kcm->tx_psock;
528 if (WARN_ON(!psock)) {
529 spin_unlock_bh(&mux->lock);
533 smp_rmb(); /* Read tx_psock before tx_wait */
535 kcm_update_tx_mux_stats(mux, psock);
537 WARN_ON(kcm->tx_wait);
539 kcm->tx_psock = NULL;
540 psock->tx_kcm = NULL;
541 KCM_STATS_INCR(psock->stats.unreserved);
543 if (unlikely(psock->tx_stopped)) {
546 list_del(&psock->psock_list);
549 fput(psock->sk->sk_socket->file);
550 kmem_cache_free(kcm_psockp, psock);
553 /* Don't put back on available list */
555 spin_unlock_bh(&mux->lock);
560 psock_now_avail(psock);
562 spin_unlock_bh(&mux->lock);
565 static void kcm_report_tx_retry(struct kcm_sock *kcm)
567 struct kcm_mux *mux = kcm->mux;
569 spin_lock_bh(&mux->lock);
570 KCM_STATS_INCR(mux->stats.tx_retries);
571 spin_unlock_bh(&mux->lock);
574 /* Write any messages ready on the kcm socket. Called with kcm sock lock
575 * held. Return bytes actually sent or error.
577 static int kcm_write_msgs(struct kcm_sock *kcm)
579 struct sock *sk = &kcm->sk;
580 struct kcm_psock *psock;
581 struct sk_buff *skb, *head;
582 struct kcm_tx_msg *txm;
583 unsigned short fragidx, frag_offset;
584 unsigned int sent, total_sent = 0;
587 kcm->tx_wait_more = false;
588 psock = kcm->tx_psock;
589 if (unlikely(psock && psock->tx_stopped)) {
590 /* A reserved psock was aborted asynchronously. Unreserve
591 * it and we'll retry the message.
593 unreserve_psock(kcm);
594 kcm_report_tx_retry(kcm);
595 if (skb_queue_empty(&sk->sk_write_queue))
598 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
600 } else if (skb_queue_empty(&sk->sk_write_queue)) {
604 head = skb_peek(&sk->sk_write_queue);
605 txm = kcm_tx_msg(head);
608 /* Send of first skbuff in queue already in progress */
609 if (WARN_ON(!psock)) {
614 frag_offset = txm->frag_offset;
615 fragidx = txm->fragidx;
622 psock = reserve_psock(kcm);
628 txm = kcm_tx_msg(head);
632 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
637 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
643 frag = &skb_shinfo(skb)->frags[fragidx];
644 if (WARN_ON(!frag->size)) {
649 ret = kernel_sendpage(psock->sk->sk_socket,
651 frag->page_offset + frag_offset,
652 frag->size - frag_offset,
655 if (ret == -EAGAIN) {
656 /* Save state to try again when there's
657 * write space on the socket
660 txm->frag_offset = frag_offset;
661 txm->fragidx = fragidx;
668 /* Hard failure in sending message, abort this
669 * psock since it has lost framing
670 * synchonization and retry sending the
671 * message from the beginning.
673 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
675 unreserve_psock(kcm);
678 kcm_report_tx_retry(kcm);
686 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
687 if (frag_offset < frag->size) {
688 /* Not finished with this frag */
694 if (skb_has_frag_list(skb)) {
695 skb = skb_shinfo(skb)->frag_list;
698 } else if (skb->next) {
703 /* Successfully sent the whole packet, account for it. */
704 skb_dequeue(&sk->sk_write_queue);
706 sk->sk_wmem_queued -= sent;
708 KCM_STATS_INCR(psock->stats.tx_msgs);
709 } while ((head = skb_peek(&sk->sk_write_queue)));
712 /* Done with all queued messages. */
713 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
714 unreserve_psock(kcm);
717 /* Check if write space is available */
718 sk->sk_write_space(sk);
720 return total_sent ? : ret;
723 static void kcm_tx_work(struct work_struct *w)
725 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
726 struct sock *sk = &kcm->sk;
731 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
734 err = kcm_write_msgs(kcm);
736 /* Hard failure in write, report error on KCM socket */
737 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
738 report_csk_error(&kcm->sk, -err);
742 /* Primarily for SOCK_SEQPACKET sockets */
743 if (likely(sk->sk_socket) &&
744 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
745 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
746 sk->sk_write_space(sk);
753 static void kcm_push(struct kcm_sock *kcm)
755 if (kcm->tx_wait_more)
759 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
760 int offset, size_t size, int flags)
763 struct sock *sk = sock->sk;
764 struct kcm_sock *kcm = kcm_sk(sk);
765 struct sk_buff *skb = NULL, *head = NULL;
766 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
771 if (flags & MSG_SENDPAGE_NOTLAST)
774 /* No MSG_EOR from splice, only look at MSG_MORE */
775 eor = !(flags & MSG_MORE);
779 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
786 /* Previously opened message */
788 skb = kcm_tx_msg(head)->last_skb;
789 i = skb_shinfo(skb)->nr_frags;
791 if (skb_can_coalesce(skb, i, page, offset)) {
792 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
793 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
797 if (i >= MAX_SKB_FRAGS) {
798 struct sk_buff *tskb;
800 tskb = alloc_skb(0, sk->sk_allocation);
803 err = sk_stream_wait_memory(sk, &timeo);
809 skb_shinfo(head)->frag_list = tskb;
814 skb->ip_summed = CHECKSUM_UNNECESSARY;
818 /* Call the sk_stream functions to manage the sndbuf mem. */
819 if (!sk_stream_memory_free(sk)) {
821 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
822 err = sk_stream_wait_memory(sk, &timeo);
827 head = alloc_skb(0, sk->sk_allocation);
830 err = sk_stream_wait_memory(sk, &timeo);
840 skb_fill_page_desc(skb, i, page, offset, size);
841 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
845 skb->data_len += size;
846 skb->truesize += size;
847 sk->sk_wmem_queued += size;
848 sk_mem_charge(sk, size);
852 head->data_len += size;
853 head->truesize += size;
857 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
859 /* Message complete, queue it on send buffer */
860 __skb_queue_tail(&sk->sk_write_queue, head);
862 KCM_STATS_INCR(kcm->stats.tx_msgs);
864 if (flags & MSG_BATCH) {
865 kcm->tx_wait_more = true;
866 } else if (kcm->tx_wait_more || not_busy) {
867 err = kcm_write_msgs(kcm);
869 /* We got a hard error in write_msgs but have
870 * already queued this message. Report an error
871 * in the socket, but don't affect return value
874 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
875 report_csk_error(&kcm->sk, -err);
879 /* Message not complete, save state */
881 kcm_tx_msg(head)->last_skb = skb;
884 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
892 err = sk_stream_error(sk, flags, err);
894 /* make sure we wake any epoll edge trigger waiter */
895 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
896 sk->sk_write_space(sk);
902 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
904 struct sock *sk = sock->sk;
905 struct kcm_sock *kcm = kcm_sk(sk);
906 struct sk_buff *skb = NULL, *head = NULL;
907 size_t copy, copied = 0;
908 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
909 int eor = (sock->type == SOCK_DGRAM) ?
910 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
915 /* Per tcp_sendmsg this should be in poll */
916 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
922 /* Previously opened message */
924 skb = kcm_tx_msg(head)->last_skb;
928 /* Call the sk_stream functions to manage the sndbuf mem. */
929 if (!sk_stream_memory_free(sk)) {
931 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
932 err = sk_stream_wait_memory(sk, &timeo);
937 if (msg_data_left(msg)) {
938 /* New message, alloc head skb */
939 head = alloc_skb(0, sk->sk_allocation);
942 err = sk_stream_wait_memory(sk, &timeo);
946 head = alloc_skb(0, sk->sk_allocation);
951 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
952 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
954 skb->ip_summed = CHECKSUM_UNNECESSARY;
958 while (msg_data_left(msg)) {
960 int i = skb_shinfo(skb)->nr_frags;
961 struct page_frag *pfrag = sk_page_frag(sk);
963 if (!sk_page_frag_refill(sk, pfrag))
964 goto wait_for_memory;
966 if (!skb_can_coalesce(skb, i, pfrag->page,
968 if (i == MAX_SKB_FRAGS) {
969 struct sk_buff *tskb;
971 tskb = alloc_skb(0, sk->sk_allocation);
973 goto wait_for_memory;
976 skb_shinfo(head)->frag_list = tskb;
981 skb->ip_summed = CHECKSUM_UNNECESSARY;
987 copy = min_t(int, msg_data_left(msg),
988 pfrag->size - pfrag->offset);
990 if (!sk_wmem_schedule(sk, copy))
991 goto wait_for_memory;
993 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1000 /* Update the skb. */
1002 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1004 skb_fill_page_desc(skb, i, pfrag->page,
1005 pfrag->offset, copy);
1006 get_page(pfrag->page);
1009 pfrag->offset += copy;
1013 head->data_len += copy;
1020 err = sk_stream_wait_memory(sk, &timeo);
1026 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1029 /* Message complete, queue it on send buffer */
1030 __skb_queue_tail(&sk->sk_write_queue, head);
1031 kcm->seq_skb = NULL;
1032 KCM_STATS_INCR(kcm->stats.tx_msgs);
1035 if (msg->msg_flags & MSG_BATCH) {
1036 kcm->tx_wait_more = true;
1037 } else if (kcm->tx_wait_more || not_busy) {
1038 err = kcm_write_msgs(kcm);
1040 /* We got a hard error in write_msgs but have
1041 * already queued this message. Report an error
1042 * in the socket, but don't affect return value
1045 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1046 report_csk_error(&kcm->sk, -err);
1050 /* Message not complete, save state */
1053 kcm->seq_skb = head;
1054 kcm_tx_msg(head)->last_skb = skb;
1058 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1066 if (copied && sock->type == SOCK_SEQPACKET) {
1067 /* Wrote some bytes before encountering an
1068 * error, return partial success.
1070 goto partial_message;
1073 if (head != kcm->seq_skb)
1076 err = sk_stream_error(sk, msg->msg_flags, err);
1078 /* make sure we wake any epoll edge trigger waiter */
1079 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1080 sk->sk_write_space(sk);
1086 static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1087 long timeo, int *err)
1089 struct sk_buff *skb;
1091 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1093 *err = sock_error(sk);
1097 if (sock_flag(sk, SOCK_DONE))
1100 if ((flags & MSG_DONTWAIT) || !timeo) {
1105 sk_wait_data(sk, &timeo, NULL);
1107 /* Handle signals */
1108 if (signal_pending(current)) {
1109 *err = sock_intr_errno(timeo);
1117 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1118 size_t len, int flags)
1120 struct sock *sk = sock->sk;
1121 struct kcm_sock *kcm = kcm_sk(sk);
1124 struct strp_rx_msg *rxm;
1126 struct sk_buff *skb;
1128 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1132 skb = kcm_wait_data(sk, flags, timeo, &err);
1136 /* Okay, have a message on the receive queue */
1138 rxm = strp_rx_msg(skb);
1140 if (len > rxm->full_len)
1141 len = rxm->full_len;
1143 err = skb_copy_datagram_msg(skb, rxm->offset, msg, len);
1148 if (likely(!(flags & MSG_PEEK))) {
1149 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1150 if (copied < rxm->full_len) {
1151 if (sock->type == SOCK_DGRAM) {
1152 /* Truncated message */
1153 msg->msg_flags |= MSG_TRUNC;
1156 rxm->offset += copied;
1157 rxm->full_len -= copied;
1160 /* Finished with message */
1161 msg->msg_flags |= MSG_EOR;
1162 KCM_STATS_INCR(kcm->stats.rx_msgs);
1163 skb_unlink(skb, &sk->sk_receive_queue);
1171 return copied ? : err;
1174 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1175 struct pipe_inode_info *pipe, size_t len,
1178 struct sock *sk = sock->sk;
1179 struct kcm_sock *kcm = kcm_sk(sk);
1181 struct strp_rx_msg *rxm;
1184 struct sk_buff *skb;
1186 /* Only support splice for SOCKSEQPACKET */
1188 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1192 skb = kcm_wait_data(sk, flags, timeo, &err);
1196 /* Okay, have a message on the receive queue */
1198 rxm = strp_rx_msg(skb);
1200 if (len > rxm->full_len)
1201 len = rxm->full_len;
1203 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags);
1209 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1211 rxm->offset += copied;
1212 rxm->full_len -= copied;
1214 /* We have no way to return MSG_EOR. If all the bytes have been
1215 * read we still leave the message in the receive socket buffer.
1216 * A subsequent recvmsg needs to be done to return MSG_EOR and
1217 * finish reading the message.
1230 /* kcm sock lock held */
1231 static void kcm_recv_disable(struct kcm_sock *kcm)
1233 struct kcm_mux *mux = kcm->mux;
1235 if (kcm->rx_disabled)
1238 spin_lock_bh(&mux->rx_lock);
1240 kcm->rx_disabled = 1;
1242 /* If a psock is reserved we'll do cleanup in unreserve */
1243 if (!kcm->rx_psock) {
1245 list_del(&kcm->wait_rx_list);
1246 /* paired with lockless reads in kcm_rfree() */
1247 WRITE_ONCE(kcm->rx_wait, false);
1250 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1253 spin_unlock_bh(&mux->rx_lock);
1256 /* kcm sock lock held */
1257 static void kcm_recv_enable(struct kcm_sock *kcm)
1259 struct kcm_mux *mux = kcm->mux;
1261 if (!kcm->rx_disabled)
1264 spin_lock_bh(&mux->rx_lock);
1266 kcm->rx_disabled = 0;
1269 spin_unlock_bh(&mux->rx_lock);
1272 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1273 char __user *optval, unsigned int optlen)
1275 struct kcm_sock *kcm = kcm_sk(sock->sk);
1279 if (level != SOL_KCM)
1280 return -ENOPROTOOPT;
1282 if (optlen < sizeof(int))
1285 if (get_user(val, (int __user *)optval))
1288 valbool = val ? 1 : 0;
1291 case KCM_RECV_DISABLE:
1292 lock_sock(&kcm->sk);
1294 kcm_recv_disable(kcm);
1296 kcm_recv_enable(kcm);
1297 release_sock(&kcm->sk);
1306 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1307 char __user *optval, int __user *optlen)
1309 struct kcm_sock *kcm = kcm_sk(sock->sk);
1312 if (level != SOL_KCM)
1313 return -ENOPROTOOPT;
1315 if (get_user(len, optlen))
1318 len = min_t(unsigned int, len, sizeof(int));
1323 case KCM_RECV_DISABLE:
1324 val = kcm->rx_disabled;
1327 return -ENOPROTOOPT;
1330 if (put_user(len, optlen))
1332 if (copy_to_user(optval, &val, len))
1337 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1339 struct kcm_sock *tkcm;
1340 struct list_head *head;
1343 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1344 * we set sk_state, otherwise epoll_wait always returns right away with
1347 kcm->sk.sk_state = TCP_ESTABLISHED;
1349 /* Add to mux's kcm sockets list */
1351 spin_lock_bh(&mux->lock);
1353 head = &mux->kcm_socks;
1354 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1355 if (tkcm->index != index)
1357 head = &tkcm->kcm_sock_list;
1361 list_add(&kcm->kcm_sock_list, head);
1364 mux->kcm_socks_cnt++;
1365 spin_unlock_bh(&mux->lock);
1367 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1369 spin_lock_bh(&mux->rx_lock);
1371 spin_unlock_bh(&mux->rx_lock);
1374 static int kcm_attach(struct socket *sock, struct socket *csock,
1375 struct bpf_prog *prog)
1377 struct kcm_sock *kcm = kcm_sk(sock->sk);
1378 struct kcm_mux *mux = kcm->mux;
1380 struct kcm_psock *psock = NULL, *tpsock;
1381 struct list_head *head;
1383 struct strp_callbacks cb;
1392 /* Only allow TCP sockets to be attached for now */
1393 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1394 csk->sk_protocol != IPPROTO_TCP) {
1399 /* Don't allow listeners or closed sockets */
1400 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1405 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1413 psock->bpf_prog = prog;
1415 cb.rcv_msg = kcm_rcv_strparser;
1416 cb.abort_parser = NULL;
1417 cb.parse_msg = kcm_parse_func_strparser;
1418 cb.read_sock_done = kcm_read_sock_done;
1420 err = strp_init(&psock->strp, csk, &cb);
1422 kmem_cache_free(kcm_psockp, psock);
1426 write_lock_bh(&csk->sk_callback_lock);
1428 /* Check if sk_user_data is aready by KCM or someone else.
1429 * Must be done under lock to prevent race conditions.
1431 if (csk->sk_user_data) {
1432 write_unlock_bh(&csk->sk_callback_lock);
1433 strp_stop(&psock->strp);
1434 strp_done(&psock->strp);
1435 kmem_cache_free(kcm_psockp, psock);
1440 psock->save_data_ready = csk->sk_data_ready;
1441 psock->save_write_space = csk->sk_write_space;
1442 psock->save_state_change = csk->sk_state_change;
1443 csk->sk_user_data = psock;
1444 csk->sk_data_ready = psock_data_ready;
1445 csk->sk_write_space = psock_write_space;
1446 csk->sk_state_change = psock_state_change;
1448 write_unlock_bh(&csk->sk_callback_lock);
1452 /* Finished initialization, now add the psock to the MUX. */
1453 spin_lock_bh(&mux->lock);
1454 head = &mux->psocks;
1455 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1456 if (tpsock->index != index)
1458 head = &tpsock->psock_list;
1462 list_add(&psock->psock_list, head);
1463 psock->index = index;
1465 KCM_STATS_INCR(mux->stats.psock_attach);
1467 psock_now_avail(psock);
1468 spin_unlock_bh(&mux->lock);
1470 /* Schedule RX work in case there are already bytes queued */
1471 strp_check_rcv(&psock->strp);
1479 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1481 struct socket *csock;
1482 struct bpf_prog *prog;
1485 csock = sockfd_lookup(info->fd, &err);
1489 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1491 err = PTR_ERR(prog);
1495 err = kcm_attach(sock, csock, prog);
1501 /* Keep reference on file also */
1509 static void kcm_unattach(struct kcm_psock *psock)
1511 struct sock *csk = psock->sk;
1512 struct kcm_mux *mux = psock->mux;
1516 /* Stop getting callbacks from TCP socket. After this there should
1517 * be no way to reserve a kcm for this psock.
1519 write_lock_bh(&csk->sk_callback_lock);
1520 csk->sk_user_data = NULL;
1521 csk->sk_data_ready = psock->save_data_ready;
1522 csk->sk_write_space = psock->save_write_space;
1523 csk->sk_state_change = psock->save_state_change;
1524 strp_stop(&psock->strp);
1526 if (WARN_ON(psock->rx_kcm)) {
1527 write_unlock_bh(&csk->sk_callback_lock);
1532 spin_lock_bh(&mux->rx_lock);
1534 /* Stop receiver activities. After this point psock should not be
1535 * able to get onto ready list either through callbacks or work.
1537 if (psock->ready_rx_msg) {
1538 list_del(&psock->psock_ready_list);
1539 kfree_skb(psock->ready_rx_msg);
1540 psock->ready_rx_msg = NULL;
1541 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1544 spin_unlock_bh(&mux->rx_lock);
1546 write_unlock_bh(&csk->sk_callback_lock);
1548 /* Call strp_done without sock lock */
1550 strp_done(&psock->strp);
1553 bpf_prog_put(psock->bpf_prog);
1555 spin_lock_bh(&mux->lock);
1557 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1558 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1560 KCM_STATS_INCR(mux->stats.psock_unattach);
1562 if (psock->tx_kcm) {
1563 /* psock was reserved. Just mark it finished and we will clean
1564 * up in the kcm paths, we need kcm lock which can not be
1567 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1568 spin_unlock_bh(&mux->lock);
1570 /* We are unattaching a socket that is reserved. Abort the
1571 * socket since we may be out of sync in sending on it. We need
1572 * to do this without the mux lock.
1574 kcm_abort_tx_psock(psock, EPIPE, false);
1576 spin_lock_bh(&mux->lock);
1577 if (!psock->tx_kcm) {
1578 /* psock now unreserved in window mux was unlocked */
1583 /* Commit done before queuing work to process it */
1586 /* Queue tx work to make sure psock->done is handled */
1587 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1588 spin_unlock_bh(&mux->lock);
1591 if (!psock->tx_stopped)
1592 list_del(&psock->psock_avail_list);
1593 list_del(&psock->psock_list);
1595 spin_unlock_bh(&mux->lock);
1598 fput(csk->sk_socket->file);
1599 kmem_cache_free(kcm_psockp, psock);
1605 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1607 struct kcm_sock *kcm = kcm_sk(sock->sk);
1608 struct kcm_mux *mux = kcm->mux;
1609 struct kcm_psock *psock;
1610 struct socket *csock;
1614 csock = sockfd_lookup(info->fd, &err);
1626 spin_lock_bh(&mux->lock);
1628 list_for_each_entry(psock, &mux->psocks, psock_list) {
1629 if (psock->sk != csk)
1632 /* Found the matching psock */
1634 if (psock->unattaching || WARN_ON(psock->done)) {
1639 psock->unattaching = 1;
1641 spin_unlock_bh(&mux->lock);
1643 /* Lower socket lock should already be held */
1644 kcm_unattach(psock);
1650 spin_unlock_bh(&mux->lock);
1657 static struct proto kcm_proto = {
1659 .owner = THIS_MODULE,
1660 .obj_size = sizeof(struct kcm_sock),
1663 /* Clone a kcm socket. */
1664 static struct file *kcm_clone(struct socket *osock)
1666 struct socket *newsock;
1670 newsock = sock_alloc();
1672 return ERR_PTR(-ENFILE);
1674 newsock->type = osock->type;
1675 newsock->ops = osock->ops;
1677 __module_get(newsock->ops->owner);
1679 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1682 sock_release(newsock);
1683 return ERR_PTR(-ENOMEM);
1685 sock_init_data(newsock, newsk);
1686 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1688 file = sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1690 sock_release(newsock);
1695 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1700 case SIOCKCMATTACH: {
1701 struct kcm_attach info;
1703 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1706 err = kcm_attach_ioctl(sock, &info);
1710 case SIOCKCMUNATTACH: {
1711 struct kcm_unattach info;
1713 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1716 err = kcm_unattach_ioctl(sock, &info);
1720 case SIOCKCMCLONE: {
1721 struct kcm_clone info;
1724 info.fd = get_unused_fd_flags(0);
1725 if (unlikely(info.fd < 0))
1728 file = kcm_clone(sock);
1730 put_unused_fd(info.fd);
1731 return PTR_ERR(file);
1733 if (copy_to_user((void __user *)arg, &info,
1735 put_unused_fd(info.fd);
1739 fd_install(info.fd, file);
1751 static void free_mux(struct rcu_head *rcu)
1753 struct kcm_mux *mux = container_of(rcu,
1754 struct kcm_mux, rcu);
1756 kmem_cache_free(kcm_muxp, mux);
1759 static void release_mux(struct kcm_mux *mux)
1761 struct kcm_net *knet = mux->knet;
1762 struct kcm_psock *psock, *tmp_psock;
1764 /* Release psocks */
1765 list_for_each_entry_safe(psock, tmp_psock,
1766 &mux->psocks, psock_list) {
1767 if (!WARN_ON(psock->unattaching))
1768 kcm_unattach(psock);
1771 if (WARN_ON(mux->psocks_cnt))
1774 __skb_queue_purge(&mux->rx_hold_queue);
1776 mutex_lock(&knet->mutex);
1777 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1778 aggregate_psock_stats(&mux->aggregate_psock_stats,
1779 &knet->aggregate_psock_stats);
1780 aggregate_strp_stats(&mux->aggregate_strp_stats,
1781 &knet->aggregate_strp_stats);
1782 list_del_rcu(&mux->kcm_mux_list);
1784 mutex_unlock(&knet->mutex);
1786 call_rcu(&mux->rcu, free_mux);
1789 static void kcm_done(struct kcm_sock *kcm)
1791 struct kcm_mux *mux = kcm->mux;
1792 struct sock *sk = &kcm->sk;
1795 spin_lock_bh(&mux->rx_lock);
1796 if (kcm->rx_psock) {
1797 /* Cleanup in unreserve_rx_kcm */
1799 kcm->rx_disabled = 1;
1801 spin_unlock_bh(&mux->rx_lock);
1806 list_del(&kcm->wait_rx_list);
1807 /* paired with lockless reads in kcm_rfree() */
1808 WRITE_ONCE(kcm->rx_wait, false);
1810 /* Move any pending receive messages to other kcm sockets */
1811 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1813 spin_unlock_bh(&mux->rx_lock);
1815 if (WARN_ON(sk_rmem_alloc_get(sk)))
1818 /* Detach from MUX */
1819 spin_lock_bh(&mux->lock);
1821 list_del(&kcm->kcm_sock_list);
1822 mux->kcm_socks_cnt--;
1823 socks_cnt = mux->kcm_socks_cnt;
1825 spin_unlock_bh(&mux->lock);
1828 /* We are done with the mux now. */
1832 WARN_ON(kcm->rx_wait);
1837 /* Called by kcm_release to close a KCM socket.
1838 * If this is the last KCM socket on the MUX, destroy the MUX.
1840 static int kcm_release(struct socket *sock)
1842 struct sock *sk = sock->sk;
1843 struct kcm_sock *kcm;
1844 struct kcm_mux *mux;
1845 struct kcm_psock *psock;
1854 kfree_skb(kcm->seq_skb);
1857 /* Purge queue under lock to avoid race condition with tx_work trying
1858 * to act when queue is nonempty. If tx_work runs after this point
1859 * it will just return.
1861 __skb_queue_purge(&sk->sk_write_queue);
1863 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1864 * get a writespace callback. This prevents further work being queued
1865 * from the callback (unbinding the psock occurs after canceling work.
1867 kcm->tx_stopped = 1;
1871 spin_lock_bh(&mux->lock);
1873 /* Take of tx_wait list, after this point there should be no way
1874 * that a psock will be assigned to this kcm.
1876 list_del(&kcm->wait_psock_list);
1877 kcm->tx_wait = false;
1879 spin_unlock_bh(&mux->lock);
1881 /* Cancel work. After this point there should be no outside references
1882 * to the kcm socket.
1884 cancel_work_sync(&kcm->tx_work);
1887 psock = kcm->tx_psock;
1889 /* A psock was reserved, so we need to kill it since it
1890 * may already have some bytes queued from a message. We
1891 * need to do this after removing kcm from tx_wait list.
1893 kcm_abort_tx_psock(psock, EPIPE, false);
1894 unreserve_psock(kcm);
1898 WARN_ON(kcm->tx_wait);
1899 WARN_ON(kcm->tx_psock);
1908 static const struct proto_ops kcm_dgram_ops = {
1910 .owner = THIS_MODULE,
1911 .release = kcm_release,
1912 .bind = sock_no_bind,
1913 .connect = sock_no_connect,
1914 .socketpair = sock_no_socketpair,
1915 .accept = sock_no_accept,
1916 .getname = sock_no_getname,
1917 .poll = datagram_poll,
1919 .listen = sock_no_listen,
1920 .shutdown = sock_no_shutdown,
1921 .setsockopt = kcm_setsockopt,
1922 .getsockopt = kcm_getsockopt,
1923 .sendmsg = kcm_sendmsg,
1924 .recvmsg = kcm_recvmsg,
1925 .mmap = sock_no_mmap,
1926 .sendpage = kcm_sendpage,
1929 static const struct proto_ops kcm_seqpacket_ops = {
1931 .owner = THIS_MODULE,
1932 .release = kcm_release,
1933 .bind = sock_no_bind,
1934 .connect = sock_no_connect,
1935 .socketpair = sock_no_socketpair,
1936 .accept = sock_no_accept,
1937 .getname = sock_no_getname,
1938 .poll = datagram_poll,
1940 .listen = sock_no_listen,
1941 .shutdown = sock_no_shutdown,
1942 .setsockopt = kcm_setsockopt,
1943 .getsockopt = kcm_getsockopt,
1944 .sendmsg = kcm_sendmsg,
1945 .recvmsg = kcm_recvmsg,
1946 .mmap = sock_no_mmap,
1947 .sendpage = kcm_sendpage,
1948 .splice_read = kcm_splice_read,
1951 /* Create proto operation for kcm sockets */
1952 static int kcm_create(struct net *net, struct socket *sock,
1953 int protocol, int kern)
1955 struct kcm_net *knet = net_generic(net, kcm_net_id);
1957 struct kcm_mux *mux;
1959 switch (sock->type) {
1961 sock->ops = &kcm_dgram_ops;
1963 case SOCK_SEQPACKET:
1964 sock->ops = &kcm_seqpacket_ops;
1967 return -ESOCKTNOSUPPORT;
1970 if (protocol != KCMPROTO_CONNECTED)
1971 return -EPROTONOSUPPORT;
1973 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1977 /* Allocate a kcm mux, shared between KCM sockets */
1978 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1984 spin_lock_init(&mux->lock);
1985 spin_lock_init(&mux->rx_lock);
1986 INIT_LIST_HEAD(&mux->kcm_socks);
1987 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1988 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1990 INIT_LIST_HEAD(&mux->psocks);
1991 INIT_LIST_HEAD(&mux->psocks_ready);
1992 INIT_LIST_HEAD(&mux->psocks_avail);
1996 /* Add new MUX to list */
1997 mutex_lock(&knet->mutex);
1998 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
2000 mutex_unlock(&knet->mutex);
2002 skb_queue_head_init(&mux->rx_hold_queue);
2004 /* Init KCM socket */
2005 sock_init_data(sock, sk);
2006 init_kcm_sock(kcm_sk(sk), mux);
2011 static struct net_proto_family kcm_family_ops = {
2013 .create = kcm_create,
2014 .owner = THIS_MODULE,
2017 static __net_init int kcm_init_net(struct net *net)
2019 struct kcm_net *knet = net_generic(net, kcm_net_id);
2021 INIT_LIST_HEAD_RCU(&knet->mux_list);
2022 mutex_init(&knet->mutex);
2027 static __net_exit void kcm_exit_net(struct net *net)
2029 struct kcm_net *knet = net_generic(net, kcm_net_id);
2031 /* All KCM sockets should be closed at this point, which should mean
2032 * that all multiplexors and psocks have been destroyed.
2034 WARN_ON(!list_empty(&knet->mux_list));
2037 static struct pernet_operations kcm_net_ops = {
2038 .init = kcm_init_net,
2039 .exit = kcm_exit_net,
2041 .size = sizeof(struct kcm_net),
2044 static int __init kcm_init(void)
2048 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2049 sizeof(struct kcm_mux), 0,
2050 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2054 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2055 sizeof(struct kcm_psock), 0,
2056 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2060 kcm_wq = create_singlethread_workqueue("kkcmd");
2064 err = proto_register(&kcm_proto, 1);
2068 err = register_pernet_device(&kcm_net_ops);
2072 err = sock_register(&kcm_family_ops);
2074 goto sock_register_fail;
2076 err = kcm_proc_init();
2078 goto proc_init_fail;
2083 sock_unregister(PF_KCM);
2086 unregister_pernet_device(&kcm_net_ops);
2089 proto_unregister(&kcm_proto);
2092 kmem_cache_destroy(kcm_muxp);
2093 kmem_cache_destroy(kcm_psockp);
2096 destroy_workqueue(kcm_wq);
2101 static void __exit kcm_exit(void)
2104 sock_unregister(PF_KCM);
2105 unregister_pernet_device(&kcm_net_ops);
2106 proto_unregister(&kcm_proto);
2107 destroy_workqueue(kcm_wq);
2109 kmem_cache_destroy(kcm_muxp);
2110 kmem_cache_destroy(kcm_psockp);
2113 module_init(kcm_init);
2114 module_exit(kcm_exit);
2116 MODULE_LICENSE("GPL");
2117 MODULE_ALIAS_NETPROTO(PF_KCM);