2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
106 atomic_t dupl_rcvcnt;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
118 struct tipc_group *group;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
191 msg_set_importance(&tsk->phdr, (u32)imp);
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
240 /* tipc_sk_respond() : send response message back to sender
242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
246 u32 onode = tipc_own_addr(sock_net(sk));
248 if (!tipc_msg_reverse(onode, &skb, err))
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
259 * Caller must hold socket lock
261 static void tsk_rej_rx_queue(struct sock *sk)
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269 static bool tipc_sk_connected(struct sock *sk)
271 return sk->sk_state == TIPC_ESTABLISHED;
274 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * Returns true if connection less, false otherwise
279 static bool tipc_sk_type_connectionless(struct sock *sk)
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284 /* tsk_peer_msg - verify if message was sent by connected port's peer
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
296 if (unlikely(!tipc_sk_connected(sk)))
299 if (unlikely(msg_origport(msg) != peer_port))
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
305 if (likely(orig_node == peer_node))
308 if (!orig_node && peer_node == self)
311 if (!peer_node && orig_node == self)
317 /* tipc_set_sk_state - set the sk_state of the socket
320 * Caller must hold socket lock
322 * Returns 0 on success, errno otherwise
324 static int tipc_set_sk_state(struct sock *sk, int state)
326 int oldsk_state = sk->sk_state;
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
351 sk->sk_state = state;
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
367 else if (!tipc_sk_connected(sk))
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
380 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
384 while ((rc_ = !(condition_))) { \
385 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \
396 remove_wait_queue(sk_sleep(sk_), &wait_); \
402 * tipc_sk_create - create a TIPC socket
403 * @net: network namespace (must be default network)
404 * @sock: pre-allocated socket structure
405 * @protocol: protocol indicator (must be 0)
406 * @kern: caused by kernel or by userspace?
408 * This routine creates additional data structures used by the TIPC socket,
409 * initializes them, and links them together.
411 * Returns 0 on success, errno otherwise
413 static int tipc_sk_create(struct net *net, struct socket *sock,
414 int protocol, int kern)
416 const struct proto_ops *ops;
418 struct tipc_sock *tsk;
419 struct tipc_msg *msg;
421 /* Validate arguments */
422 if (unlikely(protocol != 0))
423 return -EPROTONOSUPPORT;
425 switch (sock->type) {
440 /* Allocate socket's protocol area */
441 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
446 tsk->max_pkt = MAX_PKT_DEFAULT;
447 INIT_LIST_HEAD(&tsk->publications);
448 INIT_LIST_HEAD(&tsk->cong_links);
451 /* Finish initializing socket data structures */
453 sock_init_data(sock, sk);
454 tipc_set_sk_state(sk, TIPC_OPEN);
455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
460 /* Ensure tsk is visible before we read own_addr. */
463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;
473 sk->sk_destruct = tipc_sock_destruct;
474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 tsk->group_is_open = true;
476 atomic_set(&tsk->dupl_rcvcnt, 0);
478 /* Start out with safe limits until we receive an advertised window */
479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
480 tsk->rcv_win = tsk->snd_win;
482 if (tipc_sk_type_connectionless(sk)) {
483 tsk_set_unreturnable(tsk, true);
484 if (sock->type == SOCK_DGRAM)
485 tsk_set_unreliable(tsk, true);
491 static void tipc_sk_callback(struct rcu_head *head)
493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
498 /* Caller should hold socket lock for the socket. */
499 static void __tipc_shutdown(struct socket *sock, int error)
501 struct sock *sk = sock->sk;
502 struct tipc_sock *tsk = tipc_sk(sk);
503 struct net *net = sock_net(sk);
504 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
505 u32 dnode = tsk_peer_node(tsk);
508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
510 !tsk_conn_cong(tsk)));
512 /* Reject all unreceived messages, except on an active connection
513 * (which disconnects locally & sends a 'FIN+' to peer).
515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
516 if (TIPC_SKB_CB(skb)->bytes_read) {
520 if (!tipc_sk_type_connectionless(sk) &&
521 sk->sk_state != TIPC_DISCONNECTING) {
522 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
523 tipc_node_remove_conn(net, dnode, tsk->portid);
525 tipc_sk_respond(sk, skb, error);
528 if (tipc_sk_type_connectionless(sk))
531 if (sk->sk_state != TIPC_DISCONNECTING) {
532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
534 tsk_own_node(tsk), tsk_peer_port(tsk),
537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
538 tipc_node_remove_conn(net, dnode, tsk->portid);
539 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
544 * tipc_release - destroy a TIPC socket
545 * @sock: socket to destroy
547 * This routine cleans up any messages that are still queued on the socket.
548 * For DGRAM and RDM socket types, all queued messages are rejected.
549 * For SEQPACKET and STREAM socket types, the first message is rejected
550 * and any others are discarded. (If the first message on a STREAM socket
551 * is partially-read, it is discarded and the next one is rejected instead.)
553 * NOTE: Rejected messages are not necessarily returned to the sender! They
554 * are returned or discarded according to the "destination droppable" setting
555 * specified for the message by the sender.
557 * Returns 0 on success, errno otherwise
559 static int tipc_release(struct socket *sock)
561 struct sock *sk = sock->sk;
562 struct tipc_sock *tsk;
565 * Exit if socket isn't fully initialized (occurs when a failed accept()
566 * releases a pre-allocated child socket that was never used)
574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
575 sk->sk_shutdown = SHUTDOWN_MASK;
577 tipc_sk_withdraw(tsk, 0, NULL);
578 sk_stop_timer(sk, &sk->sk_timer);
582 /* Reject any messages that accumulated in backlog queue */
584 tipc_dest_list_purge(&tsk->cong_links);
585 tsk->cong_link_cnt = 0;
586 call_rcu(&tsk->rcu, tipc_sk_callback);
593 * tipc_bind - associate or disassocate TIPC name(s) with a socket
594 * @sock: socket structure
595 * @uaddr: socket address describing name(s) and desired operation
596 * @uaddr_len: size of socket address data structure
598 * Name and name sequence binding is indicated using a positive scope value;
599 * a negative scope value unbinds the specified name. Specifying no name
600 * (i.e. a socket address length of 0) unbinds all names from the socket.
602 * Returns 0 on success, errno otherwise
604 * NOTE: This routine doesn't need to take the socket lock since it doesn't
605 * access any non-constant socket information.
607 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
610 struct sock *sk = sock->sk;
611 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
612 struct tipc_sock *tsk = tipc_sk(sk);
616 if (unlikely(!uaddr_len)) {
617 res = tipc_sk_withdraw(tsk, 0, NULL);
624 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
628 if (addr->family != AF_TIPC) {
633 if (addr->addrtype == TIPC_ADDR_NAME)
634 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
635 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
640 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
641 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
642 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
647 res = (addr->scope >= 0) ?
648 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
649 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
656 * tipc_getname - get port ID of socket or peer socket
657 * @sock: socket structure
658 * @uaddr: area for returned socket address
659 * @uaddr_len: area for returned length of socket address
660 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
662 * Returns 0 on success, errno otherwise
664 * NOTE: This routine doesn't need to take the socket lock since it only
665 * accesses socket information that is unchanging (or which changes in
666 * a completely predictable manner).
668 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
671 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
672 struct sock *sk = sock->sk;
673 struct tipc_sock *tsk = tipc_sk(sk);
675 memset(addr, 0, sizeof(*addr));
677 if ((!tipc_sk_connected(sk)) &&
678 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
680 addr->addr.id.ref = tsk_peer_port(tsk);
681 addr->addr.id.node = tsk_peer_node(tsk);
683 addr->addr.id.ref = tsk->portid;
684 addr->addr.id.node = tipc_own_addr(sock_net(sk));
687 addr->addrtype = TIPC_ADDR_ID;
688 addr->family = AF_TIPC;
690 addr->addr.name.domain = 0;
692 return sizeof(*addr);
696 * tipc_poll - read and possibly block on pollmask
697 * @file: file structure associated with the socket
698 * @sock: socket for which to calculate the poll bits
701 * Returns pollmask value
704 * It appears that the usual socket locking mechanisms are not useful here
705 * since the pollmask info is potentially out-of-date the moment this routine
706 * exits. TCP and other protocols seem to rely on higher level poll routines
707 * to handle any preventable race conditions, so TIPC will do the same ...
709 * IMPORTANT: The fact that a read or write operation is indicated does NOT
710 * imply that the operation will succeed, merely that it should be performed
711 * and will not block.
713 static __poll_t tipc_poll(struct file *file, struct socket *sock,
716 struct sock *sk = sock->sk;
717 struct tipc_sock *tsk = tipc_sk(sk);
718 __poll_t revents = 0;
720 sock_poll_wait(file, sock, wait);
722 if (sk->sk_shutdown & RCV_SHUTDOWN)
723 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
724 if (sk->sk_shutdown == SHUTDOWN_MASK)
727 switch (sk->sk_state) {
728 case TIPC_ESTABLISHED:
729 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
733 case TIPC_CONNECTING:
734 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
735 revents |= EPOLLIN | EPOLLRDNORM;
738 if (tsk->group_is_open && !tsk->cong_link_cnt)
740 if (!tipc_sk_type_connectionless(sk))
742 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM;
746 case TIPC_DISCONNECTING:
747 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
754 * tipc_sendmcast - send multicast message
755 * @sock: socket structure
756 * @seq: destination address
757 * @msg: message to send
758 * @dlen: length of data to send
759 * @timeout: timeout to wait for wakeup
761 * Called from function tipc_sendmsg(), which has done all sanity checks
762 * Returns the number of bytes sent on success, or errno
764 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
765 struct msghdr *msg, size_t dlen, long timeout)
767 struct sock *sk = sock->sk;
768 struct tipc_sock *tsk = tipc_sk(sk);
769 struct tipc_msg *hdr = &tsk->phdr;
770 struct net *net = sock_net(sk);
771 int mtu = tipc_bcast_get_mtu(net);
772 struct tipc_mc_method *method = &tsk->mc_method;
773 struct sk_buff_head pkts;
774 struct tipc_nlist dsts;
780 /* Block or return if any destination link is congested */
781 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
785 /* Lookup destination nodes */
786 tipc_nlist_init(&dsts, tipc_own_addr(net));
787 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
789 if (!dsts.local && !dsts.remote)
790 return -EHOSTUNREACH;
792 /* Build message header */
793 msg_set_type(hdr, TIPC_MCAST_MSG);
794 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
795 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
796 msg_set_destport(hdr, 0);
797 msg_set_destnode(hdr, 0);
798 msg_set_nametype(hdr, seq->type);
799 msg_set_namelower(hdr, seq->lower);
800 msg_set_nameupper(hdr, seq->upper);
802 /* Build message as chain of buffers */
803 __skb_queue_head_init(&pkts);
804 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
806 /* Send message if build was successful */
807 if (unlikely(rc == dlen))
808 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
809 &tsk->cong_link_cnt);
811 tipc_nlist_purge(&dsts);
813 return rc ? rc : dlen;
817 * tipc_send_group_msg - send a message to a member in the group
818 * @net: network namespace
819 * @m: message to send
821 * @dnode: destination node
822 * @dport: destination port
823 * @dlen: total length of message data
825 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
826 struct msghdr *m, struct tipc_member *mb,
827 u32 dnode, u32 dport, int dlen)
829 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
830 struct tipc_mc_method *method = &tsk->mc_method;
831 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
832 struct tipc_msg *hdr = &tsk->phdr;
833 struct sk_buff_head pkts;
836 /* Complete message header */
837 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
838 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
839 msg_set_destport(hdr, dport);
840 msg_set_destnode(hdr, dnode);
841 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
843 /* Build message as chain of buffers */
844 __skb_queue_head_init(&pkts);
845 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
846 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
847 if (unlikely(rc != dlen))
851 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
852 if (unlikely(rc == -ELINKCONG)) {
853 tipc_dest_push(&tsk->cong_links, dnode, 0);
854 tsk->cong_link_cnt++;
857 /* Update send window */
858 tipc_group_update_member(mb, blks);
860 /* A broadcast sent within next EXPIRE period must follow same path */
861 method->rcast = true;
862 method->mandatory = true;
867 * tipc_send_group_unicast - send message to a member in the group
868 * @sock: socket structure
869 * @m: message to send
870 * @dlen: total length of message data
871 * @timeout: timeout to wait for wakeup
873 * Called from function tipc_sendmsg(), which has done all sanity checks
874 * Returns the number of bytes sent on success, or errno
876 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
877 int dlen, long timeout)
879 struct sock *sk = sock->sk;
880 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
881 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
882 struct tipc_sock *tsk = tipc_sk(sk);
883 struct net *net = sock_net(sk);
884 struct tipc_member *mb = NULL;
888 node = dest->addr.id.node;
889 port = dest->addr.id.ref;
891 return -EHOSTUNREACH;
893 /* Block or return if destination link or member is congested */
894 rc = tipc_wait_for_cond(sock, &timeout,
895 !tipc_dest_find(&tsk->cong_links, node, 0) &&
897 !tipc_group_cong(tsk->group, node, port, blks,
903 return -EHOSTUNREACH;
905 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
907 return rc ? rc : dlen;
911 * tipc_send_group_anycast - send message to any member with given identity
912 * @sock: socket structure
913 * @m: message to send
914 * @dlen: total length of message data
915 * @timeout: timeout to wait for wakeup
917 * Called from function tipc_sendmsg(), which has done all sanity checks
918 * Returns the number of bytes sent on success, or errno
920 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
921 int dlen, long timeout)
923 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
924 struct sock *sk = sock->sk;
925 struct tipc_sock *tsk = tipc_sk(sk);
926 struct list_head *cong_links = &tsk->cong_links;
927 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
928 struct tipc_msg *hdr = &tsk->phdr;
929 struct tipc_member *first = NULL;
930 struct tipc_member *mbr = NULL;
931 struct net *net = sock_net(sk);
932 u32 node, port, exclude;
933 struct list_head dsts;
934 u32 type, inst, scope;
939 INIT_LIST_HEAD(&dsts);
941 type = msg_nametype(hdr);
942 inst = dest->addr.name.name.instance;
943 scope = msg_lookup_scope(hdr);
945 while (++lookups < 4) {
946 exclude = tipc_group_exclude(tsk->group);
950 /* Look for a non-congested destination member, if any */
952 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
953 &dstcnt, exclude, false))
954 return -EHOSTUNREACH;
955 tipc_dest_pop(&dsts, &node, &port);
956 cong = tipc_group_cong(tsk->group, node, port, blks,
966 /* Start over if destination was not in member list */
970 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
973 /* Block or return if destination link or member is congested */
974 rc = tipc_wait_for_cond(sock, &timeout,
975 !tipc_dest_find(cong_links, node, 0) &&
977 !tipc_group_cong(tsk->group, node, port,
982 /* Send, unless destination disappeared while waiting */
987 if (unlikely(lookups >= 4))
988 return -EHOSTUNREACH;
990 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
992 return rc ? rc : dlen;
996 * tipc_send_group_bcast - send message to all members in communication group
997 * @sk: socket structure
998 * @m: message to send
999 * @dlen: total length of message data
1000 * @timeout: timeout to wait for wakeup
1002 * Called from function tipc_sendmsg(), which has done all sanity checks
1003 * Returns the number of bytes sent on success, or errno
1005 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1006 int dlen, long timeout)
1008 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1009 struct sock *sk = sock->sk;
1010 struct net *net = sock_net(sk);
1011 struct tipc_sock *tsk = tipc_sk(sk);
1012 struct tipc_nlist *dsts;
1013 struct tipc_mc_method *method = &tsk->mc_method;
1014 bool ack = method->mandatory && method->rcast;
1015 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1016 struct tipc_msg *hdr = &tsk->phdr;
1017 int mtu = tipc_bcast_get_mtu(net);
1018 struct sk_buff_head pkts;
1019 int rc = -EHOSTUNREACH;
1021 /* Block or return if any destination link or member is congested */
1022 rc = tipc_wait_for_cond(sock, &timeout,
1023 !tsk->cong_link_cnt && tsk->group &&
1024 !tipc_group_bc_cong(tsk->group, blks));
1028 dsts = tipc_group_dests(tsk->group);
1029 if (!dsts->local && !dsts->remote)
1030 return -EHOSTUNREACH;
1032 /* Complete message header */
1034 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1035 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1037 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1038 msg_set_nameinst(hdr, 0);
1040 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1041 msg_set_destport(hdr, 0);
1042 msg_set_destnode(hdr, 0);
1043 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1045 /* Avoid getting stuck with repeated forced replicasts */
1046 msg_set_grp_bc_ack_req(hdr, ack);
1048 /* Build message as chain of buffers */
1049 __skb_queue_head_init(&pkts);
1050 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1051 if (unlikely(rc != dlen))
1055 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1059 /* Update broadcast sequence number and send windows */
1060 tipc_group_update_bc_members(tsk->group, blks, ack);
1062 /* Broadcast link is now free to choose method for next broadcast */
1063 method->mandatory = false;
1064 method->expires = jiffies;
1070 * tipc_send_group_mcast - send message to all members with given identity
1071 * @sock: socket structure
1072 * @m: message to send
1073 * @dlen: total length of message data
1074 * @timeout: timeout to wait for wakeup
1076 * Called from function tipc_sendmsg(), which has done all sanity checks
1077 * Returns the number of bytes sent on success, or errno
1079 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1080 int dlen, long timeout)
1082 struct sock *sk = sock->sk;
1083 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1084 struct tipc_sock *tsk = tipc_sk(sk);
1085 struct tipc_group *grp = tsk->group;
1086 struct tipc_msg *hdr = &tsk->phdr;
1087 struct net *net = sock_net(sk);
1088 u32 type, inst, scope, exclude;
1089 struct list_head dsts;
1092 INIT_LIST_HEAD(&dsts);
1094 type = msg_nametype(hdr);
1095 inst = dest->addr.name.name.instance;
1096 scope = msg_lookup_scope(hdr);
1097 exclude = tipc_group_exclude(grp);
1099 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1100 &dstcnt, exclude, true))
1101 return -EHOSTUNREACH;
1104 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1105 return tipc_send_group_unicast(sock, m, dlen, timeout);
1108 tipc_dest_list_purge(&dsts);
1109 return tipc_send_group_bcast(sock, m, dlen, timeout);
1113 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1114 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1115 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1117 * Multi-threaded: parallel calls with reference to same queues may occur
1119 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1120 struct sk_buff_head *inputq)
1122 u32 self = tipc_own_addr(net);
1123 u32 type, lower, upper, scope;
1124 struct sk_buff *skb, *_skb;
1126 struct sk_buff_head tmpq;
1127 struct list_head dports;
1128 struct tipc_msg *hdr;
1129 int user, mtyp, hlen;
1132 __skb_queue_head_init(&tmpq);
1133 INIT_LIST_HEAD(&dports);
1135 skb = tipc_skb_peek(arrvq, &inputq->lock);
1136 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1138 user = msg_user(hdr);
1139 mtyp = msg_type(hdr);
1140 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1141 onode = msg_orignode(hdr);
1142 type = msg_nametype(hdr);
1144 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1145 spin_lock_bh(&inputq->lock);
1146 if (skb_peek(arrvq) == skb) {
1147 __skb_dequeue(arrvq);
1148 __skb_queue_tail(inputq, skb);
1151 spin_unlock_bh(&inputq->lock);
1155 /* Group messages require exact scope match */
1156 if (msg_in_group(hdr)) {
1159 scope = msg_lookup_scope(hdr);
1162 /* TIPC_NODE_SCOPE means "any scope" in this context */
1164 scope = TIPC_NODE_SCOPE;
1166 scope = TIPC_CLUSTER_SCOPE;
1168 lower = msg_namelower(hdr);
1169 upper = msg_nameupper(hdr);
1172 /* Create destination port list: */
1173 tipc_nametbl_mc_lookup(net, type, lower, upper,
1174 scope, exact, &dports);
1176 /* Clone message per destination */
1177 while (tipc_dest_pop(&dports, NULL, &portid)) {
1178 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1180 msg_set_destport(buf_msg(_skb), portid);
1181 __skb_queue_tail(&tmpq, _skb);
1184 pr_warn("Failed to clone mcast rcv buffer\n");
1186 /* Append to inputq if not already done by other thread */
1187 spin_lock_bh(&inputq->lock);
1188 if (skb_peek(arrvq) == skb) {
1189 skb_queue_splice_tail_init(&tmpq, inputq);
1190 /* Decrease the skb's refcnt as increasing in the
1191 * function tipc_skb_peek
1193 kfree_skb(__skb_dequeue(arrvq));
1195 spin_unlock_bh(&inputq->lock);
1196 __skb_queue_purge(&tmpq);
1199 tipc_sk_rcv(net, inputq);
1203 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1204 * @tsk: receiving socket
1205 * @skb: pointer to message buffer.
1207 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1208 struct sk_buff_head *inputq,
1209 struct sk_buff_head *xmitq)
1211 struct tipc_msg *hdr = buf_msg(skb);
1212 u32 onode = tsk_own_node(tsk);
1213 struct sock *sk = &tsk->sk;
1214 int mtyp = msg_type(hdr);
1217 /* Ignore if connection cannot be validated: */
1218 if (!tsk_peer_msg(tsk, hdr))
1221 if (unlikely(msg_errcode(hdr))) {
1222 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1223 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1224 tsk_peer_port(tsk));
1225 sk->sk_state_change(sk);
1227 /* State change is ignored if socket already awake,
1228 * - convert msg to abort msg and add to inqueue
1230 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1231 msg_set_type(hdr, TIPC_CONN_MSG);
1232 msg_set_size(hdr, BASIC_H_SIZE);
1233 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1234 __skb_queue_tail(inputq, skb);
1238 tsk->probe_unacked = false;
1240 if (mtyp == CONN_PROBE) {
1241 msg_set_type(hdr, CONN_PROBE_REPLY);
1242 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1243 __skb_queue_tail(xmitq, skb);
1245 } else if (mtyp == CONN_ACK) {
1246 conn_cong = tsk_conn_cong(tsk);
1247 tsk->snt_unacked -= msg_conn_ack(hdr);
1248 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1249 tsk->snd_win = msg_adv_win(hdr);
1251 sk->sk_write_space(sk);
1252 } else if (mtyp != CONN_PROBE_REPLY) {
1253 pr_warn("Received unknown CONN_PROTO msg\n");
1260 * tipc_sendmsg - send message in connectionless manner
1261 * @sock: socket structure
1262 * @m: message to send
1263 * @dsz: amount of user data to be sent
1265 * Message must have an destination specified explicitly.
1266 * Used for SOCK_RDM and SOCK_DGRAM messages,
1267 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1268 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1270 * Returns the number of bytes sent on success, or errno otherwise
1272 static int tipc_sendmsg(struct socket *sock,
1273 struct msghdr *m, size_t dsz)
1275 struct sock *sk = sock->sk;
1279 ret = __tipc_sendmsg(sock, m, dsz);
1285 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1287 struct sock *sk = sock->sk;
1288 struct net *net = sock_net(sk);
1289 struct tipc_sock *tsk = tipc_sk(sk);
1290 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1291 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1292 struct list_head *clinks = &tsk->cong_links;
1293 bool syn = !tipc_sk_type_connectionless(sk);
1294 struct tipc_group *grp = tsk->group;
1295 struct tipc_msg *hdr = &tsk->phdr;
1296 struct tipc_name_seq *seq;
1297 struct sk_buff_head pkts;
1298 u32 dport, dnode = 0;
1302 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1306 if (unlikely(m->msg_namelen < sizeof(*dest)))
1308 if (unlikely(dest->family != AF_TIPC))
1314 return tipc_send_group_bcast(sock, m, dlen, timeout);
1315 if (dest->addrtype == TIPC_ADDR_NAME)
1316 return tipc_send_group_anycast(sock, m, dlen, timeout);
1317 if (dest->addrtype == TIPC_ADDR_ID)
1318 return tipc_send_group_unicast(sock, m, dlen, timeout);
1319 if (dest->addrtype == TIPC_ADDR_MCAST)
1320 return tipc_send_group_mcast(sock, m, dlen, timeout);
1324 if (unlikely(!dest)) {
1326 if (!syn && dest->family != AF_TIPC)
1327 return -EDESTADDRREQ;
1330 if (unlikely(syn)) {
1331 if (sk->sk_state == TIPC_LISTEN)
1333 if (sk->sk_state != TIPC_OPEN)
1337 if (dest->addrtype == TIPC_ADDR_NAME) {
1338 tsk->conn_type = dest->addr.name.name.type;
1339 tsk->conn_instance = dest->addr.name.name.instance;
1343 seq = &dest->addr.nameseq;
1344 if (dest->addrtype == TIPC_ADDR_MCAST)
1345 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1347 if (dest->addrtype == TIPC_ADDR_NAME) {
1348 type = dest->addr.name.name.type;
1349 inst = dest->addr.name.name.instance;
1350 dnode = dest->addr.name.domain;
1351 msg_set_type(hdr, TIPC_NAMED_MSG);
1352 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1353 msg_set_nametype(hdr, type);
1354 msg_set_nameinst(hdr, inst);
1355 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1356 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1357 msg_set_destnode(hdr, dnode);
1358 msg_set_destport(hdr, dport);
1359 if (unlikely(!dport && !dnode))
1360 return -EHOSTUNREACH;
1361 } else if (dest->addrtype == TIPC_ADDR_ID) {
1362 dnode = dest->addr.id.node;
1363 msg_set_type(hdr, TIPC_DIRECT_MSG);
1364 msg_set_lookup_scope(hdr, 0);
1365 msg_set_destnode(hdr, dnode);
1366 msg_set_destport(hdr, dest->addr.id.ref);
1367 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1372 /* Block or return if destination link is congested */
1373 rc = tipc_wait_for_cond(sock, &timeout,
1374 !tipc_dest_find(clinks, dnode, 0));
1378 __skb_queue_head_init(&pkts);
1379 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1380 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1381 if (unlikely(rc != dlen))
1384 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1385 if (unlikely(rc == -ELINKCONG)) {
1386 tipc_dest_push(clinks, dnode, 0);
1387 tsk->cong_link_cnt++;
1391 if (unlikely(syn && !rc))
1392 tipc_set_sk_state(sk, TIPC_CONNECTING);
1394 return rc ? rc : dlen;
1398 * tipc_sendstream - send stream-oriented data
1399 * @sock: socket structure
1401 * @dsz: total length of data to be transmitted
1403 * Used for SOCK_STREAM data.
1405 * Returns the number of bytes sent on success (or partial success),
1406 * or errno if no data sent
1408 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1410 struct sock *sk = sock->sk;
1414 ret = __tipc_sendstream(sock, m, dsz);
1420 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1422 struct sock *sk = sock->sk;
1423 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1424 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1425 struct tipc_sock *tsk = tipc_sk(sk);
1426 struct tipc_msg *hdr = &tsk->phdr;
1427 struct net *net = sock_net(sk);
1428 struct sk_buff_head pkts;
1429 u32 dnode = tsk_peer_node(tsk);
1433 __skb_queue_head_init(&pkts);
1435 if (unlikely(dlen > INT_MAX))
1438 /* Handle implicit connection setup */
1439 if (unlikely(dest)) {
1440 rc = __tipc_sendmsg(sock, m, dlen);
1441 if (dlen && dlen == rc) {
1442 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1443 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1449 rc = tipc_wait_for_cond(sock, &timeout,
1450 (!tsk->cong_link_cnt &&
1451 !tsk_conn_cong(tsk) &&
1452 tipc_sk_connected(sk)));
1456 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1457 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1458 if (unlikely(rc != send))
1461 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1462 if (unlikely(rc == -ELINKCONG)) {
1463 tsk->cong_link_cnt = 1;
1467 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1470 } while (sent < dlen && !rc);
1472 return sent ? sent : rc;
1476 * tipc_send_packet - send a connection-oriented message
1477 * @sock: socket structure
1478 * @m: message to send
1479 * @dsz: length of data to be transmitted
1481 * Used for SOCK_SEQPACKET messages.
1483 * Returns the number of bytes sent on success, or errno otherwise
1485 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1487 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1490 return tipc_sendstream(sock, m, dsz);
1493 /* tipc_sk_finish_conn - complete the setup of a connection
1495 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1498 struct sock *sk = &tsk->sk;
1499 struct net *net = sock_net(sk);
1500 struct tipc_msg *msg = &tsk->phdr;
1502 msg_set_destnode(msg, peer_node);
1503 msg_set_destport(msg, peer_port);
1504 msg_set_type(msg, TIPC_CONN_MSG);
1505 msg_set_lookup_scope(msg, 0);
1506 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1508 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1509 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1510 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1511 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1512 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1513 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1516 /* Fall back to message based flow control */
1517 tsk->rcv_win = FLOWCTL_MSG_WIN;
1518 tsk->snd_win = FLOWCTL_MSG_WIN;
1522 * tipc_sk_set_orig_addr - capture sender's address for received message
1523 * @m: descriptor for message info
1524 * @hdr: received message header
1526 * Note: Address is not captured if not requested by receiver.
1528 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1530 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1531 struct tipc_msg *hdr = buf_msg(skb);
1536 srcaddr->sock.family = AF_TIPC;
1537 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1538 srcaddr->sock.scope = 0;
1539 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1540 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1541 srcaddr->sock.addr.name.domain = 0;
1542 m->msg_namelen = sizeof(struct sockaddr_tipc);
1544 if (!msg_in_group(hdr))
1547 /* Group message users may also want to know sending member's id */
1548 srcaddr->member.family = AF_TIPC;
1549 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1550 srcaddr->member.scope = 0;
1551 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1552 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1553 srcaddr->member.addr.name.domain = 0;
1554 m->msg_namelen = sizeof(*srcaddr);
1558 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1559 * @m: descriptor for message info
1560 * @skb: received message buffer
1561 * @tsk: TIPC port associated with message
1563 * Note: Ancillary data is not captured if not requested by receiver.
1565 * Returns 0 if successful, otherwise errno
1567 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1568 struct tipc_sock *tsk)
1570 struct tipc_msg *msg;
1577 if (likely(m->msg_controllen == 0))
1581 /* Optionally capture errored message object(s) */
1582 err = msg ? msg_errcode(msg) : 0;
1583 if (unlikely(err)) {
1585 anc_data[1] = msg_data_sz(msg);
1586 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1590 if (skb_linearize(skb))
1593 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1600 /* Optionally capture message destination object */
1601 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1602 switch (dest_type) {
1603 case TIPC_NAMED_MSG:
1605 anc_data[0] = msg_nametype(msg);
1606 anc_data[1] = msg_namelower(msg);
1607 anc_data[2] = msg_namelower(msg);
1609 case TIPC_MCAST_MSG:
1611 anc_data[0] = msg_nametype(msg);
1612 anc_data[1] = msg_namelower(msg);
1613 anc_data[2] = msg_nameupper(msg);
1616 has_name = (tsk->conn_type != 0);
1617 anc_data[0] = tsk->conn_type;
1618 anc_data[1] = tsk->conn_instance;
1619 anc_data[2] = tsk->conn_instance;
1625 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1633 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1635 struct sock *sk = &tsk->sk;
1636 struct net *net = sock_net(sk);
1637 struct sk_buff *skb = NULL;
1638 struct tipc_msg *msg;
1639 u32 peer_port = tsk_peer_port(tsk);
1640 u32 dnode = tsk_peer_node(tsk);
1642 if (!tipc_sk_connected(sk))
1644 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1645 dnode, tsk_own_node(tsk), peer_port,
1646 tsk->portid, TIPC_OK);
1650 msg_set_conn_ack(msg, tsk->rcv_unacked);
1651 tsk->rcv_unacked = 0;
1653 /* Adjust to and advertize the correct window limit */
1654 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1655 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1656 msg_set_adv_win(msg, tsk->rcv_win);
1658 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1661 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1663 struct sock *sk = sock->sk;
1665 long timeo = *timeop;
1666 int err = sock_error(sk);
1672 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1673 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1674 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1679 timeo = schedule_timeout(timeo);
1683 if (!skb_queue_empty(&sk->sk_receive_queue))
1688 err = sock_intr_errno(timeo);
1689 if (signal_pending(current))
1692 err = sock_error(sk);
1696 finish_wait(sk_sleep(sk), &wait);
1702 * tipc_recvmsg - receive packet-oriented message
1703 * @m: descriptor for message info
1704 * @buflen: length of user buffer area
1705 * @flags: receive flags
1707 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1708 * If the complete message doesn't fit in user area, truncate it.
1710 * Returns size of returned message data, errno otherwise
1712 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1713 size_t buflen, int flags)
1715 struct sock *sk = sock->sk;
1716 bool connected = !tipc_sk_type_connectionless(sk);
1717 struct tipc_sock *tsk = tipc_sk(sk);
1718 int rc, err, hlen, dlen, copy;
1719 struct tipc_skb_cb *skb_cb;
1720 struct sk_buff_head xmitq;
1721 struct tipc_msg *hdr;
1722 struct sk_buff *skb;
1726 /* Catch invalid receive requests */
1727 if (unlikely(!buflen))
1731 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1735 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1737 /* Step rcv queue to first msg with data or error; wait if necessary */
1739 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1742 skb = skb_peek(&sk->sk_receive_queue);
1743 skb_cb = TIPC_SKB_CB(skb);
1745 dlen = msg_data_sz(hdr);
1746 hlen = msg_hdr_sz(hdr);
1747 err = msg_errcode(hdr);
1748 grp_evt = msg_is_grp_evt(hdr);
1749 if (likely(dlen || err))
1751 tsk_advance_rx_queue(sk);
1754 /* Collect msg meta data, including error code and rejected data */
1755 tipc_sk_set_orig_addr(m, skb);
1756 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1761 /* Capture data if non-error msg, otherwise just set return value */
1763 int offset = skb_cb->bytes_read;
1765 copy = min_t(int, dlen - offset, buflen);
1766 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1769 if (unlikely(offset + copy < dlen)) {
1770 if (flags & MSG_EOR) {
1771 if (!(flags & MSG_PEEK))
1772 skb_cb->bytes_read = offset + copy;
1774 m->msg_flags |= MSG_TRUNC;
1775 skb_cb->bytes_read = 0;
1778 if (flags & MSG_EOR)
1779 m->msg_flags |= MSG_EOR;
1780 skb_cb->bytes_read = 0;
1785 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1791 /* Mark message as group event if applicable */
1792 if (unlikely(grp_evt)) {
1793 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1794 m->msg_flags |= MSG_EOR;
1795 m->msg_flags |= MSG_OOB;
1799 /* Caption of data or error code/rejected data was successful */
1800 if (unlikely(flags & MSG_PEEK))
1803 /* Send group flow control advertisement when applicable */
1804 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1805 __skb_queue_head_init(&xmitq);
1806 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1807 msg_orignode(hdr), msg_origport(hdr),
1809 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1812 if (skb_cb->bytes_read)
1815 tsk_advance_rx_queue(sk);
1817 if (likely(!connected))
1820 /* Send connection flow control advertisement when applicable */
1821 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1822 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1823 tipc_sk_send_ack(tsk);
1826 return rc ? rc : copy;
1830 * tipc_recvstream - receive stream-oriented data
1831 * @m: descriptor for message info
1832 * @buflen: total size of user buffer area
1833 * @flags: receive flags
1835 * Used for SOCK_STREAM messages only. If not enough data is available
1836 * will optionally wait for more; never truncates data.
1838 * Returns size of returned message data, errno otherwise
1840 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1841 size_t buflen, int flags)
1843 struct sock *sk = sock->sk;
1844 struct tipc_sock *tsk = tipc_sk(sk);
1845 struct sk_buff *skb;
1846 struct tipc_msg *hdr;
1847 struct tipc_skb_cb *skb_cb;
1848 bool peek = flags & MSG_PEEK;
1849 int offset, required, copy, copied = 0;
1850 int hlen, dlen, err, rc;
1853 /* Catch invalid receive attempts */
1854 if (unlikely(!buflen))
1859 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1863 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1864 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1867 /* Look at first msg in receive queue; wait if necessary */
1868 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1871 skb = skb_peek(&sk->sk_receive_queue);
1872 skb_cb = TIPC_SKB_CB(skb);
1874 dlen = msg_data_sz(hdr);
1875 hlen = msg_hdr_sz(hdr);
1876 err = msg_errcode(hdr);
1878 /* Discard any empty non-errored (SYN-) message */
1879 if (unlikely(!dlen && !err)) {
1880 tsk_advance_rx_queue(sk);
1884 /* Collect msg meta data, incl. error code and rejected data */
1886 tipc_sk_set_orig_addr(m, skb);
1887 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1893 /* Copy data if msg ok, otherwise return error/partial data */
1895 offset = skb_cb->bytes_read;
1896 copy = min_t(int, dlen - offset, buflen - copied);
1897 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1902 if (unlikely(offset < dlen)) {
1904 skb_cb->bytes_read = offset;
1909 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1918 tsk_advance_rx_queue(sk);
1920 /* Send connection flow control advertisement when applicable */
1921 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1922 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1923 tipc_sk_send_ack(tsk);
1925 /* Exit if all requested data or FIN/error received */
1926 if (copied == buflen || err)
1929 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1932 return copied ? copied : rc;
1936 * tipc_write_space - wake up thread if port congestion is released
1939 static void tipc_write_space(struct sock *sk)
1941 struct socket_wq *wq;
1944 wq = rcu_dereference(sk->sk_wq);
1945 if (skwq_has_sleeper(wq))
1946 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1947 EPOLLWRNORM | EPOLLWRBAND);
1952 * tipc_data_ready - wake up threads to indicate messages have been received
1954 * @len: the length of messages
1956 static void tipc_data_ready(struct sock *sk)
1958 struct socket_wq *wq;
1961 wq = rcu_dereference(sk->sk_wq);
1962 if (skwq_has_sleeper(wq))
1963 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1964 EPOLLRDNORM | EPOLLRDBAND);
1968 static void tipc_sock_destruct(struct sock *sk)
1970 __skb_queue_purge(&sk->sk_receive_queue);
1973 static void tipc_sk_proto_rcv(struct sock *sk,
1974 struct sk_buff_head *inputq,
1975 struct sk_buff_head *xmitq)
1977 struct sk_buff *skb = __skb_dequeue(inputq);
1978 struct tipc_sock *tsk = tipc_sk(sk);
1979 struct tipc_msg *hdr = buf_msg(skb);
1980 struct tipc_group *grp = tsk->group;
1981 bool wakeup = false;
1983 switch (msg_user(hdr)) {
1985 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1988 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1989 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1991 tsk->cong_link_cnt--;
1994 case GROUP_PROTOCOL:
1995 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1998 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1999 hdr, inputq, xmitq);
2006 sk->sk_write_space(sk);
2012 * tipc_filter_connect - Handle incoming message for a connection-based socket
2014 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
2016 * Returns true if everything ok, false otherwise
2018 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2020 struct sock *sk = &tsk->sk;
2021 struct net *net = sock_net(sk);
2022 struct tipc_msg *hdr = buf_msg(skb);
2023 u32 pport = msg_origport(hdr);
2024 u32 pnode = msg_orignode(hdr);
2026 if (unlikely(msg_mcast(hdr)))
2029 switch (sk->sk_state) {
2030 case TIPC_CONNECTING:
2031 /* Accept only ACK or NACK message */
2032 if (unlikely(!msg_connected(hdr))) {
2033 if (pport != tsk_peer_port(tsk) ||
2034 pnode != tsk_peer_node(tsk))
2037 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2038 sk->sk_err = ECONNREFUSED;
2039 sk->sk_state_change(sk);
2043 if (unlikely(msg_errcode(hdr))) {
2044 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2045 sk->sk_err = ECONNREFUSED;
2046 sk->sk_state_change(sk);
2050 if (unlikely(!msg_isdata(hdr))) {
2051 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2052 sk->sk_err = EINVAL;
2053 sk->sk_state_change(sk);
2057 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2058 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2060 /* If 'ACK+' message, add to socket receive queue */
2061 if (msg_data_sz(hdr))
2064 /* If empty 'ACK-' message, wake up sleeping connect() */
2065 sk->sk_state_change(sk);
2067 /* 'ACK-' message is neither accepted nor rejected: */
2068 msg_set_dest_droppable(hdr, 1);
2072 case TIPC_DISCONNECTING:
2075 /* Accept only SYN message */
2076 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2079 case TIPC_ESTABLISHED:
2080 /* Accept only connection-based messages sent by peer */
2081 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2084 if (unlikely(msg_errcode(hdr))) {
2085 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2086 /* Let timer expire on it's own */
2087 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2089 sk->sk_state_change(sk);
2093 pr_err("Unknown sk_state %u\n", sk->sk_state);
2100 * rcvbuf_limit - get proper overload limit of socket receive queue
2104 * For connection oriented messages, irrespective of importance,
2105 * default queue limit is 2 MB.
2107 * For connectionless messages, queue limits are based on message
2108 * importance as follows:
2110 * TIPC_LOW_IMPORTANCE (2 MB)
2111 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2112 * TIPC_HIGH_IMPORTANCE (8 MB)
2113 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2115 * Returns overload limit according to corresponding message importance
2117 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2119 struct tipc_sock *tsk = tipc_sk(sk);
2120 struct tipc_msg *hdr = buf_msg(skb);
2122 if (unlikely(msg_in_group(hdr)))
2123 return sk->sk_rcvbuf;
2125 if (unlikely(!msg_connected(hdr)))
2126 return sk->sk_rcvbuf << msg_importance(hdr);
2128 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2129 return sk->sk_rcvbuf;
2131 return FLOWCTL_MSG_LIM;
2135 * tipc_sk_filter_rcv - validate incoming message
2137 * @skb: pointer to message.
2139 * Enqueues message on receive queue if acceptable; optionally handles
2140 * disconnect indication for a connected socket.
2142 * Called with socket lock already taken
2145 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2146 struct sk_buff_head *xmitq)
2148 bool sk_conn = !tipc_sk_type_connectionless(sk);
2149 struct tipc_sock *tsk = tipc_sk(sk);
2150 struct tipc_group *grp = tsk->group;
2151 struct tipc_msg *hdr = buf_msg(skb);
2152 struct net *net = sock_net(sk);
2153 struct sk_buff_head inputq;
2154 int limit, err = TIPC_OK;
2156 TIPC_SKB_CB(skb)->bytes_read = 0;
2157 __skb_queue_head_init(&inputq);
2158 __skb_queue_tail(&inputq, skb);
2160 if (unlikely(!msg_isdata(hdr)))
2161 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2164 tipc_group_filter_msg(grp, &inputq, xmitq);
2166 /* Validate and add to receive buffer if there is space */
2167 while ((skb = __skb_dequeue(&inputq))) {
2169 limit = rcvbuf_limit(sk, skb);
2170 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2171 (!sk_conn && msg_connected(hdr)) ||
2172 (!grp && msg_in_group(hdr)))
2173 err = TIPC_ERR_NO_PORT;
2174 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2175 atomic_inc(&sk->sk_drops);
2176 err = TIPC_ERR_OVERLOAD;
2179 if (unlikely(err)) {
2180 tipc_skb_reject(net, err, skb, xmitq);
2184 __skb_queue_tail(&sk->sk_receive_queue, skb);
2185 skb_set_owner_r(skb, sk);
2186 sk->sk_data_ready(sk);
2191 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2195 * Caller must hold socket lock
2197 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2199 unsigned int before = sk_rmem_alloc_get(sk);
2200 struct sk_buff_head xmitq;
2203 __skb_queue_head_init(&xmitq);
2205 tipc_sk_filter_rcv(sk, skb, &xmitq);
2206 added = sk_rmem_alloc_get(sk) - before;
2207 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2209 /* Send pending response/rejected messages, if any */
2210 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2215 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2216 * inputq and try adding them to socket or backlog queue
2217 * @inputq: list of incoming buffers with potentially different destinations
2218 * @sk: socket where the buffers should be enqueued
2219 * @dport: port number for the socket
2221 * Caller must hold socket lock
2223 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2224 u32 dport, struct sk_buff_head *xmitq)
2226 unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2227 struct sk_buff *skb;
2232 while (skb_queue_len(inputq)) {
2233 if (unlikely(time_after_eq(jiffies, time_limit)))
2236 skb = tipc_skb_dequeue(inputq, dport);
2240 /* Add message directly to receive queue if possible */
2241 if (!sock_owned_by_user(sk)) {
2242 tipc_sk_filter_rcv(sk, skb, xmitq);
2246 /* Try backlog, compensating for double-counted bytes */
2247 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2248 if (!sk->sk_backlog.len)
2249 atomic_set(dcnt, 0);
2250 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2251 if (likely(!sk_add_backlog(sk, skb, lim)))
2254 /* Overload => reject message back to sender */
2255 onode = tipc_own_addr(sock_net(sk));
2256 atomic_inc(&sk->sk_drops);
2257 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2258 __skb_queue_tail(xmitq, skb);
2264 * tipc_sk_rcv - handle a chain of incoming buffers
2265 * @inputq: buffer list containing the buffers
2266 * Consumes all buffers in list until inputq is empty
2267 * Note: may be called in multiple threads referring to the same queue
2269 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2271 struct sk_buff_head xmitq;
2272 u32 dnode, dport = 0;
2274 struct tipc_sock *tsk;
2276 struct sk_buff *skb;
2278 __skb_queue_head_init(&xmitq);
2279 while (skb_queue_len(inputq)) {
2280 dport = tipc_skb_peek_port(inputq, dport);
2281 tsk = tipc_sk_lookup(net, dport);
2285 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2286 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2287 spin_unlock_bh(&sk->sk_lock.slock);
2289 /* Send pending response/rejected messages, if any */
2290 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2294 /* No destination socket => dequeue skb if still there */
2295 skb = tipc_skb_dequeue(inputq, dport);
2299 /* Try secondary lookup if unresolved named message */
2300 err = TIPC_ERR_NO_PORT;
2301 if (tipc_msg_lookup_dest(net, skb, &err))
2304 /* Prepare for message rejection */
2305 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2308 dnode = msg_destnode(buf_msg(skb));
2309 tipc_node_xmit_skb(net, skb, dnode, dport);
2313 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2315 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2316 struct sock *sk = sock->sk;
2320 int err = sock_error(sk);
2325 if (signal_pending(current))
2326 return sock_intr_errno(*timeo_p);
2328 add_wait_queue(sk_sleep(sk), &wait);
2329 done = sk_wait_event(sk, timeo_p,
2330 sk->sk_state != TIPC_CONNECTING, &wait);
2331 remove_wait_queue(sk_sleep(sk), &wait);
2336 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2338 if (addr->family != AF_TIPC)
2340 if (addr->addrtype == TIPC_SERVICE_RANGE)
2341 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2342 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2343 addr->addrtype == TIPC_SOCKET_ADDR);
2347 * tipc_connect - establish a connection to another TIPC port
2348 * @sock: socket structure
2349 * @dest: socket address for destination port
2350 * @destlen: size of socket address data structure
2351 * @flags: file-related flags associated with socket
2353 * Returns 0 on success, errno otherwise
2355 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2356 int destlen, int flags)
2358 struct sock *sk = sock->sk;
2359 struct tipc_sock *tsk = tipc_sk(sk);
2360 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2361 struct msghdr m = {NULL,};
2362 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2366 if (destlen != sizeof(struct sockaddr_tipc))
2376 if (dst->family == AF_UNSPEC) {
2377 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2378 if (!tipc_sk_type_connectionless(sk))
2382 if (!tipc_sockaddr_is_sane(dst)) {
2386 /* DGRAM/RDM connect(), just save the destaddr */
2387 if (tipc_sk_type_connectionless(sk)) {
2388 memcpy(&tsk->peer, dest, destlen);
2390 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2395 previous = sk->sk_state;
2397 switch (sk->sk_state) {
2399 /* Send a 'SYN-' to destination */
2401 m.msg_namelen = destlen;
2403 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2404 * indicate send_msg() is never blocked.
2407 m.msg_flags = MSG_DONTWAIT;
2409 res = __tipc_sendmsg(sock, &m, 0);
2410 if ((res < 0) && (res != -EWOULDBLOCK))
2413 /* Just entered TIPC_CONNECTING state; the only
2414 * difference is that return value in non-blocking
2415 * case is EINPROGRESS, rather than EALREADY.
2419 case TIPC_CONNECTING:
2421 if (previous == TIPC_CONNECTING)
2425 timeout = msecs_to_jiffies(timeout);
2426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2427 res = tipc_wait_for_connect(sock, &timeout);
2429 case TIPC_ESTABLISHED:
2442 * tipc_listen - allow socket to listen for incoming connections
2443 * @sock: socket structure
2446 * Returns 0 on success, errno otherwise
2448 static int tipc_listen(struct socket *sock, int len)
2450 struct sock *sk = sock->sk;
2454 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2460 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2462 struct sock *sk = sock->sk;
2463 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2466 /* True wake-one mechanism for incoming connections: only
2467 * one process gets woken up, not the 'whole herd'.
2468 * Since we do not 'race & poll' for established sockets
2469 * anymore, the common case will execute the loop only once.
2472 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2473 add_wait_queue(sk_sleep(sk), &wait);
2475 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2477 remove_wait_queue(sk_sleep(sk), &wait);
2480 if (!skb_queue_empty(&sk->sk_receive_queue))
2485 err = sock_intr_errno(timeo);
2486 if (signal_pending(current))
2493 * tipc_accept - wait for connection request
2494 * @sock: listening socket
2495 * @newsock: new socket that is to be connected
2496 * @flags: file-related flags associated with socket
2498 * Returns 0 on success, errno otherwise
2500 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2503 struct sock *new_sk, *sk = sock->sk;
2504 struct sk_buff *buf;
2505 struct tipc_sock *new_tsock;
2506 struct tipc_msg *msg;
2512 if (sk->sk_state != TIPC_LISTEN) {
2516 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2517 res = tipc_wait_for_accept(sock, timeo);
2521 buf = skb_peek(&sk->sk_receive_queue);
2523 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2526 security_sk_clone(sock->sk, new_sock->sk);
2528 new_sk = new_sock->sk;
2529 new_tsock = tipc_sk(new_sk);
2532 /* we lock on new_sk; but lockdep sees the lock on sk */
2533 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2536 * Reject any stray messages received by new socket
2537 * before the socket lock was taken (very, very unlikely)
2539 tsk_rej_rx_queue(new_sk);
2541 /* Connect new socket to it's peer */
2542 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2544 tsk_set_importance(new_tsock, msg_importance(msg));
2545 if (msg_named(msg)) {
2546 new_tsock->conn_type = msg_nametype(msg);
2547 new_tsock->conn_instance = msg_nameinst(msg);
2551 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2552 * Respond to 'SYN+' by queuing it on new socket.
2554 if (!msg_data_sz(msg)) {
2555 struct msghdr m = {NULL,};
2557 tsk_advance_rx_queue(sk);
2558 __tipc_sendstream(new_sock, &m, 0);
2560 __skb_dequeue(&sk->sk_receive_queue);
2561 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2562 skb_set_owner_r(buf, new_sk);
2564 release_sock(new_sk);
2571 * tipc_shutdown - shutdown socket connection
2572 * @sock: socket structure
2573 * @how: direction to close (must be SHUT_RDWR)
2575 * Terminates connection (if necessary), then purges socket's receive queue.
2577 * Returns 0 on success, errno otherwise
2579 static int tipc_shutdown(struct socket *sock, int how)
2581 struct sock *sk = sock->sk;
2584 if (how != SHUT_RDWR)
2589 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2590 sk->sk_shutdown = SHUTDOWN_MASK;
2592 if (sk->sk_state == TIPC_DISCONNECTING) {
2593 /* Discard any unreceived messages */
2594 __skb_queue_purge(&sk->sk_receive_queue);
2600 /* Wake up anyone sleeping in poll. */
2601 sk->sk_state_change(sk);
2607 static void tipc_sk_timeout(struct timer_list *t)
2609 struct sock *sk = from_timer(sk, t, sk_timer);
2610 struct tipc_sock *tsk = tipc_sk(sk);
2611 u32 peer_port = tsk_peer_port(tsk);
2612 u32 peer_node = tsk_peer_node(tsk);
2613 u32 own_node = tsk_own_node(tsk);
2614 u32 own_port = tsk->portid;
2615 struct net *net = sock_net(sk);
2616 struct sk_buff *skb = NULL;
2619 if (!tipc_sk_connected(sk))
2622 /* Try again later if socket is busy */
2623 if (sock_owned_by_user(sk)) {
2624 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2628 if (tsk->probe_unacked) {
2629 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2630 tipc_node_remove_conn(net, peer_node, peer_port);
2631 sk->sk_state_change(sk);
2634 /* Send new probe */
2635 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2636 peer_node, own_node, peer_port, own_port,
2638 tsk->probe_unacked = true;
2639 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2643 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2647 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2648 struct tipc_name_seq const *seq)
2650 struct sock *sk = &tsk->sk;
2651 struct net *net = sock_net(sk);
2652 struct publication *publ;
2655 if (scope != TIPC_NODE_SCOPE)
2656 scope = TIPC_CLUSTER_SCOPE;
2658 if (tipc_sk_connected(sk))
2660 key = tsk->portid + tsk->pub_count + 1;
2661 if (key == tsk->portid)
2664 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2665 scope, tsk->portid, key);
2666 if (unlikely(!publ))
2669 list_add(&publ->binding_sock, &tsk->publications);
2675 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2676 struct tipc_name_seq const *seq)
2678 struct net *net = sock_net(&tsk->sk);
2679 struct publication *publ;
2680 struct publication *safe;
2683 if (scope != TIPC_NODE_SCOPE)
2684 scope = TIPC_CLUSTER_SCOPE;
2686 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2688 if (publ->scope != scope)
2690 if (publ->type != seq->type)
2692 if (publ->lower != seq->lower)
2694 if (publ->upper != seq->upper)
2696 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2697 publ->upper, publ->key);
2701 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2702 publ->upper, publ->key);
2705 if (list_empty(&tsk->publications))
2710 /* tipc_sk_reinit: set non-zero address in all existing sockets
2711 * when we go from standalone to network mode.
2713 void tipc_sk_reinit(struct net *net)
2715 struct tipc_net *tn = net_generic(net, tipc_net_id);
2716 struct rhashtable_iter iter;
2717 struct tipc_sock *tsk;
2718 struct tipc_msg *msg;
2720 rhashtable_walk_enter(&tn->sk_rht, &iter);
2723 rhashtable_walk_start(&iter);
2725 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2726 sock_hold(&tsk->sk);
2727 rhashtable_walk_stop(&iter);
2728 lock_sock(&tsk->sk);
2730 msg_set_prevnode(msg, tipc_own_addr(net));
2731 msg_set_orignode(msg, tipc_own_addr(net));
2732 release_sock(&tsk->sk);
2733 rhashtable_walk_start(&iter);
2737 rhashtable_walk_stop(&iter);
2738 } while (tsk == ERR_PTR(-EAGAIN));
2740 rhashtable_walk_exit(&iter);
2743 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2745 struct tipc_net *tn = net_generic(net, tipc_net_id);
2746 struct tipc_sock *tsk;
2749 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2751 sock_hold(&tsk->sk);
2757 static int tipc_sk_insert(struct tipc_sock *tsk)
2759 struct sock *sk = &tsk->sk;
2760 struct net *net = sock_net(sk);
2761 struct tipc_net *tn = net_generic(net, tipc_net_id);
2762 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2763 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2765 while (remaining--) {
2767 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2768 portid = TIPC_MIN_PORT;
2769 tsk->portid = portid;
2770 sock_hold(&tsk->sk);
2771 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2780 static void tipc_sk_remove(struct tipc_sock *tsk)
2782 struct sock *sk = &tsk->sk;
2783 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2785 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2786 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2791 static const struct rhashtable_params tsk_rht_params = {
2793 .head_offset = offsetof(struct tipc_sock, node),
2794 .key_offset = offsetof(struct tipc_sock, portid),
2795 .key_len = sizeof(u32), /* portid */
2796 .max_size = 1048576,
2798 .automatic_shrinking = true,
2801 int tipc_sk_rht_init(struct net *net)
2803 struct tipc_net *tn = net_generic(net, tipc_net_id);
2805 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2808 void tipc_sk_rht_destroy(struct net *net)
2810 struct tipc_net *tn = net_generic(net, tipc_net_id);
2812 /* Wait for socket readers to complete */
2815 rhashtable_destroy(&tn->sk_rht);
2818 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2820 struct net *net = sock_net(&tsk->sk);
2821 struct tipc_group *grp = tsk->group;
2822 struct tipc_msg *hdr = &tsk->phdr;
2823 struct tipc_name_seq seq;
2826 if (mreq->type < TIPC_RESERVED_TYPES)
2828 if (mreq->scope > TIPC_NODE_SCOPE)
2832 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2836 msg_set_lookup_scope(hdr, mreq->scope);
2837 msg_set_nametype(hdr, mreq->type);
2838 msg_set_dest_droppable(hdr, true);
2839 seq.type = mreq->type;
2840 seq.lower = mreq->instance;
2841 seq.upper = seq.lower;
2842 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2843 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2845 tipc_group_delete(net, grp);
2849 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2850 tsk->mc_method.rcast = true;
2851 tsk->mc_method.mandatory = true;
2852 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2856 static int tipc_sk_leave(struct tipc_sock *tsk)
2858 struct net *net = sock_net(&tsk->sk);
2859 struct tipc_group *grp = tsk->group;
2860 struct tipc_name_seq seq;
2865 tipc_group_self(grp, &seq, &scope);
2866 tipc_group_delete(net, grp);
2868 tipc_sk_withdraw(tsk, scope, &seq);
2873 * tipc_setsockopt - set socket option
2874 * @sock: socket structure
2875 * @lvl: option level
2876 * @opt: option identifier
2877 * @ov: pointer to new option value
2878 * @ol: length of option value
2880 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2881 * (to ease compatibility).
2883 * Returns 0 on success, errno otherwise
2885 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2886 char __user *ov, unsigned int ol)
2888 struct sock *sk = sock->sk;
2889 struct tipc_sock *tsk = tipc_sk(sk);
2890 struct tipc_group_req mreq;
2894 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2896 if (lvl != SOL_TIPC)
2897 return -ENOPROTOOPT;
2900 case TIPC_IMPORTANCE:
2901 case TIPC_SRC_DROPPABLE:
2902 case TIPC_DEST_DROPPABLE:
2903 case TIPC_CONN_TIMEOUT:
2904 if (ol < sizeof(value))
2906 if (get_user(value, (u32 __user *)ov))
2909 case TIPC_GROUP_JOIN:
2910 if (ol < sizeof(mreq))
2912 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2923 case TIPC_IMPORTANCE:
2924 res = tsk_set_importance(tsk, value);
2926 case TIPC_SRC_DROPPABLE:
2927 if (sock->type != SOCK_STREAM)
2928 tsk_set_unreliable(tsk, value);
2932 case TIPC_DEST_DROPPABLE:
2933 tsk_set_unreturnable(tsk, value);
2935 case TIPC_CONN_TIMEOUT:
2936 tipc_sk(sk)->conn_timeout = value;
2938 case TIPC_MCAST_BROADCAST:
2939 tsk->mc_method.rcast = false;
2940 tsk->mc_method.mandatory = true;
2942 case TIPC_MCAST_REPLICAST:
2943 tsk->mc_method.rcast = true;
2944 tsk->mc_method.mandatory = true;
2946 case TIPC_GROUP_JOIN:
2947 res = tipc_sk_join(tsk, &mreq);
2949 case TIPC_GROUP_LEAVE:
2950 res = tipc_sk_leave(tsk);
2962 * tipc_getsockopt - get socket option
2963 * @sock: socket structure
2964 * @lvl: option level
2965 * @opt: option identifier
2966 * @ov: receptacle for option value
2967 * @ol: receptacle for length of option value
2969 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2970 * (to ease compatibility).
2972 * Returns 0 on success, errno otherwise
2974 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2975 char __user *ov, int __user *ol)
2977 struct sock *sk = sock->sk;
2978 struct tipc_sock *tsk = tipc_sk(sk);
2979 struct tipc_name_seq seq;
2984 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2985 return put_user(0, ol);
2986 if (lvl != SOL_TIPC)
2987 return -ENOPROTOOPT;
2988 res = get_user(len, ol);
2995 case TIPC_IMPORTANCE:
2996 value = tsk_importance(tsk);
2998 case TIPC_SRC_DROPPABLE:
2999 value = tsk_unreliable(tsk);
3001 case TIPC_DEST_DROPPABLE:
3002 value = tsk_unreturnable(tsk);
3004 case TIPC_CONN_TIMEOUT:
3005 value = tsk->conn_timeout;
3006 /* no need to set "res", since already 0 at this point */
3008 case TIPC_NODE_RECVQ_DEPTH:
3009 value = 0; /* was tipc_queue_size, now obsolete */
3011 case TIPC_SOCK_RECVQ_DEPTH:
3012 value = skb_queue_len(&sk->sk_receive_queue);
3014 case TIPC_GROUP_JOIN:
3017 tipc_group_self(tsk->group, &seq, &scope);
3027 return res; /* "get" failed */
3029 if (len < sizeof(value))
3032 if (copy_to_user(ov, &value, sizeof(value)))
3035 return put_user(sizeof(value), ol);
3038 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3040 struct net *net = sock_net(sock->sk);
3041 struct tipc_sioc_nodeid_req nr = {0};
3042 struct tipc_sioc_ln_req lnr;
3043 void __user *argp = (void __user *)arg;
3046 case SIOCGETLINKNAME:
3047 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3049 if (!tipc_node_get_linkname(net,
3050 lnr.bearer_id & 0xffff, lnr.peer,
3051 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3052 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3056 return -EADDRNOTAVAIL;
3058 if (copy_from_user(&nr, argp, sizeof(nr)))
3060 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3061 return -EADDRNOTAVAIL;
3062 if (copy_to_user(argp, &nr, sizeof(nr)))
3066 return -ENOIOCTLCMD;
3070 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3072 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3073 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3074 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3076 tsk1->peer.family = AF_TIPC;
3077 tsk1->peer.addrtype = TIPC_ADDR_ID;
3078 tsk1->peer.scope = TIPC_NODE_SCOPE;
3079 tsk1->peer.addr.id.ref = tsk2->portid;
3080 tsk1->peer.addr.id.node = onode;
3081 tsk2->peer.family = AF_TIPC;
3082 tsk2->peer.addrtype = TIPC_ADDR_ID;
3083 tsk2->peer.scope = TIPC_NODE_SCOPE;
3084 tsk2->peer.addr.id.ref = tsk1->portid;
3085 tsk2->peer.addr.id.node = onode;
3087 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3088 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3092 /* Protocol switches for the various types of TIPC sockets */
3094 static const struct proto_ops msg_ops = {
3095 .owner = THIS_MODULE,
3097 .release = tipc_release,
3099 .connect = tipc_connect,
3100 .socketpair = tipc_socketpair,
3101 .accept = sock_no_accept,
3102 .getname = tipc_getname,
3104 .ioctl = tipc_ioctl,
3105 .listen = sock_no_listen,
3106 .shutdown = tipc_shutdown,
3107 .setsockopt = tipc_setsockopt,
3108 .getsockopt = tipc_getsockopt,
3109 .sendmsg = tipc_sendmsg,
3110 .recvmsg = tipc_recvmsg,
3111 .mmap = sock_no_mmap,
3112 .sendpage = sock_no_sendpage
3115 static const struct proto_ops packet_ops = {
3116 .owner = THIS_MODULE,
3118 .release = tipc_release,
3120 .connect = tipc_connect,
3121 .socketpair = tipc_socketpair,
3122 .accept = tipc_accept,
3123 .getname = tipc_getname,
3125 .ioctl = tipc_ioctl,
3126 .listen = tipc_listen,
3127 .shutdown = tipc_shutdown,
3128 .setsockopt = tipc_setsockopt,
3129 .getsockopt = tipc_getsockopt,
3130 .sendmsg = tipc_send_packet,
3131 .recvmsg = tipc_recvmsg,
3132 .mmap = sock_no_mmap,
3133 .sendpage = sock_no_sendpage
3136 static const struct proto_ops stream_ops = {
3137 .owner = THIS_MODULE,
3139 .release = tipc_release,
3141 .connect = tipc_connect,
3142 .socketpair = tipc_socketpair,
3143 .accept = tipc_accept,
3144 .getname = tipc_getname,
3146 .ioctl = tipc_ioctl,
3147 .listen = tipc_listen,
3148 .shutdown = tipc_shutdown,
3149 .setsockopt = tipc_setsockopt,
3150 .getsockopt = tipc_getsockopt,
3151 .sendmsg = tipc_sendstream,
3152 .recvmsg = tipc_recvstream,
3153 .mmap = sock_no_mmap,
3154 .sendpage = sock_no_sendpage
3157 static const struct net_proto_family tipc_family_ops = {
3158 .owner = THIS_MODULE,
3160 .create = tipc_sk_create
3163 static struct proto tipc_proto = {
3165 .owner = THIS_MODULE,
3166 .obj_size = sizeof(struct tipc_sock),
3167 .sysctl_rmem = sysctl_tipc_rmem
3171 * tipc_socket_init - initialize TIPC socket interface
3173 * Returns 0 on success, errno otherwise
3175 int tipc_socket_init(void)
3179 res = proto_register(&tipc_proto, 1);
3181 pr_err("Failed to register TIPC protocol type\n");
3185 res = sock_register(&tipc_family_ops);
3187 pr_err("Failed to register TIPC socket type\n");
3188 proto_unregister(&tipc_proto);
3196 * tipc_socket_stop - stop TIPC socket interface
3198 void tipc_socket_stop(void)
3200 sock_unregister(tipc_family_ops.family);
3201 proto_unregister(&tipc_proto);
3204 /* Caller should hold socket lock for the passed tipc socket. */
3205 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3209 struct nlattr *nest;
3211 peer_node = tsk_peer_node(tsk);
3212 peer_port = tsk_peer_port(tsk);
3214 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3216 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3218 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3221 if (tsk->conn_type != 0) {
3222 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3224 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3226 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3229 nla_nest_end(skb, nest);
3234 nla_nest_cancel(skb, nest);
3239 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3242 struct net *net = sock_net(skb->sk);
3243 struct sock *sk = &tsk->sk;
3245 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3246 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3249 if (tipc_sk_connected(sk)) {
3250 if (__tipc_nl_add_sk_con(skb, tsk))
3252 } else if (!list_empty(&tsk->publications)) {
3253 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3259 /* Caller should hold socket lock for the passed tipc socket. */
3260 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3261 struct tipc_sock *tsk)
3263 struct nlattr *attrs;
3266 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3267 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3271 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3273 goto genlmsg_cancel;
3275 if (__tipc_nl_add_sk_info(skb, tsk))
3276 goto attr_msg_cancel;
3278 nla_nest_end(skb, attrs);
3279 genlmsg_end(skb, hdr);
3284 nla_nest_cancel(skb, attrs);
3286 genlmsg_cancel(skb, hdr);
3291 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3292 int (*skb_handler)(struct sk_buff *skb,
3293 struct netlink_callback *cb,
3294 struct tipc_sock *tsk))
3296 struct rhashtable_iter *iter = (void *)cb->args[4];
3297 struct tipc_sock *tsk;
3300 rhashtable_walk_start(iter);
3301 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3304 if (err == -EAGAIN) {
3311 sock_hold(&tsk->sk);
3312 rhashtable_walk_stop(iter);
3313 lock_sock(&tsk->sk);
3314 err = skb_handler(skb, cb, tsk);
3316 release_sock(&tsk->sk);
3320 release_sock(&tsk->sk);
3321 rhashtable_walk_start(iter);
3324 rhashtable_walk_stop(iter);
3328 EXPORT_SYMBOL(tipc_nl_sk_walk);
3330 int tipc_dump_start(struct netlink_callback *cb)
3332 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3334 EXPORT_SYMBOL(tipc_dump_start);
3336 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3338 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3339 struct rhashtable_iter *iter = (void *)cb->args[4];
3340 struct tipc_net *tn = tipc_net(net);
3343 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3347 cb->args[4] = (long)iter;
3350 rhashtable_walk_enter(&tn->sk_rht, iter);
3354 int tipc_dump_done(struct netlink_callback *cb)
3356 struct rhashtable_iter *hti = (void *)cb->args[4];
3358 rhashtable_walk_exit(hti);
3362 EXPORT_SYMBOL(tipc_dump_done);
3364 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3365 struct tipc_sock *tsk, u32 sk_filter_state,
3366 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3368 struct sock *sk = &tsk->sk;
3369 struct nlattr *attrs;
3370 struct nlattr *stat;
3372 /*filter response w.r.t sk_state*/
3373 if (!(sk_filter_state & (1 << sk->sk_state)))
3376 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3380 if (__tipc_nl_add_sk_info(skb, tsk))
3381 goto attr_msg_cancel;
3383 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3384 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3385 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3386 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3387 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3389 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3390 tipc_diag_gen_cookie(sk),
3392 goto attr_msg_cancel;
3394 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3396 goto attr_msg_cancel;
3398 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3399 skb_queue_len(&sk->sk_receive_queue)) ||
3400 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3401 skb_queue_len(&sk->sk_write_queue)) ||
3402 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3403 atomic_read(&sk->sk_drops)))
3404 goto stat_msg_cancel;
3406 if (tsk->cong_link_cnt &&
3407 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3408 goto stat_msg_cancel;
3410 if (tsk_conn_cong(tsk) &&
3411 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3412 goto stat_msg_cancel;
3414 nla_nest_end(skb, stat);
3417 if (tipc_group_fill_sock_diag(tsk->group, skb))
3418 goto stat_msg_cancel;
3420 nla_nest_end(skb, attrs);
3425 nla_nest_cancel(skb, stat);
3427 nla_nest_cancel(skb, attrs);
3431 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3433 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3435 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3438 /* Caller should hold socket lock for the passed tipc socket. */
3439 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3440 struct netlink_callback *cb,
3441 struct publication *publ)
3444 struct nlattr *attrs;
3446 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3447 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3451 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3453 goto genlmsg_cancel;
3455 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3456 goto attr_msg_cancel;
3457 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3458 goto attr_msg_cancel;
3459 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3460 goto attr_msg_cancel;
3461 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3462 goto attr_msg_cancel;
3464 nla_nest_end(skb, attrs);
3465 genlmsg_end(skb, hdr);
3470 nla_nest_cancel(skb, attrs);
3472 genlmsg_cancel(skb, hdr);
3477 /* Caller should hold socket lock for the passed tipc socket. */
3478 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3479 struct netlink_callback *cb,
3480 struct tipc_sock *tsk, u32 *last_publ)
3483 struct publication *p;
3486 list_for_each_entry(p, &tsk->publications, binding_sock) {
3487 if (p->key == *last_publ)
3490 if (p->key != *last_publ) {
3491 /* We never set seq or call nl_dump_check_consistent()
3492 * this means that setting prev_seq here will cause the
3493 * consistence check to fail in the netlink callback
3494 * handler. Resulting in the last NLMSG_DONE message
3495 * having the NLM_F_DUMP_INTR flag set.
3502 p = list_first_entry(&tsk->publications, struct publication,
3506 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3507 err = __tipc_nl_add_sk_publ(skb, cb, p);
3509 *last_publ = p->key;
3518 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3521 u32 tsk_portid = cb->args[0];
3522 u32 last_publ = cb->args[1];
3523 u32 done = cb->args[2];
3524 struct net *net = sock_net(skb->sk);
3525 struct tipc_sock *tsk;
3528 struct nlattr **attrs;
3529 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3531 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3535 if (!attrs[TIPC_NLA_SOCK])
3538 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3539 attrs[TIPC_NLA_SOCK],
3540 tipc_nl_sock_policy, NULL);
3544 if (!sock[TIPC_NLA_SOCK_REF])
3547 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3553 tsk = tipc_sk_lookup(net, tsk_portid);
3557 lock_sock(&tsk->sk);
3558 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3561 release_sock(&tsk->sk);
3564 cb->args[0] = tsk_portid;
3565 cb->args[1] = last_publ;