2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
53 #define TIPC_FWD_MSG 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
59 TIPC_LISTEN = TCP_LISTEN,
60 TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 TIPC_OPEN = TCP_CLOSE,
62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 TIPC_CONNECTING = TCP_SYN_SENT,
66 struct sockaddr_pair {
67 struct sockaddr_tipc sock;
68 struct sockaddr_tipc member;
72 * struct tipc_sock - TIPC socket structure
73 * @sk: socket - interacts with 'port' and with user via the socket API
74 * @conn_type: TIPC type used when connection was established
75 * @conn_instance: TIPC instance used when connection was established
76 * @published: non-zero if port has one or more associated names
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * #cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
105 atomic_t dupl_rcvcnt;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
118 struct tipc_group *group;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
191 msg_set_importance(&tsk->phdr, (u32)imp);
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
241 /* tipc_sk_respond() : send response message back to sender
243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
247 u32 onode = tipc_own_addr(sock_net(sk));
249 if (!tipc_msg_reverse(onode, &skb, err))
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 dnode = msg_destnode(buf_msg(skb));
254 selector = msg_origport(buf_msg(skb));
255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
261 * Caller must hold socket lock
263 static void tsk_rej_rx_queue(struct sock *sk, int error)
267 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 tipc_sk_respond(sk, skb, error);
271 static bool tipc_sk_connected(struct sock *sk)
273 return sk->sk_state == TIPC_ESTABLISHED;
276 /* tipc_sk_type_connectionless - check if the socket is datagram socket
279 * Returns true if connection less, false otherwise
281 static bool tipc_sk_type_connectionless(struct sock *sk)
283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
286 /* tsk_peer_msg - verify if message was sent by connected port's peer
288 * Handles cases where the node's network address has changed from
289 * the default of <0.0.0> to its configured setting.
291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
293 struct sock *sk = &tsk->sk;
294 u32 self = tipc_own_addr(sock_net(sk));
295 u32 peer_port = tsk_peer_port(tsk);
296 u32 orig_node, peer_node;
298 if (unlikely(!tipc_sk_connected(sk)))
301 if (unlikely(msg_origport(msg) != peer_port))
304 orig_node = msg_orignode(msg);
305 peer_node = tsk_peer_node(tsk);
307 if (likely(orig_node == peer_node))
310 if (!orig_node && peer_node == self)
313 if (!peer_node && orig_node == self)
319 /* tipc_set_sk_state - set the sk_state of the socket
322 * Caller must hold socket lock
324 * Returns 0 on success, errno otherwise
326 static int tipc_set_sk_state(struct sock *sk, int state)
328 int oldsk_state = sk->sk_state;
336 case TIPC_CONNECTING:
337 if (oldsk_state == TIPC_OPEN)
340 case TIPC_ESTABLISHED:
341 if (oldsk_state == TIPC_CONNECTING ||
342 oldsk_state == TIPC_OPEN)
345 case TIPC_DISCONNECTING:
346 if (oldsk_state == TIPC_CONNECTING ||
347 oldsk_state == TIPC_ESTABLISHED)
353 sk->sk_state = state;
358 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
360 struct sock *sk = sock->sk;
361 int err = sock_error(sk);
362 int typ = sock->type;
366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 if (sk->sk_state == TIPC_DISCONNECTING)
369 else if (!tipc_sk_connected(sk))
374 if (signal_pending(current))
375 return sock_intr_errno(*timeout);
380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
386 while ((rc_ = !(condition_))) { \
387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
390 rc_ = tipc_sk_sock_err((sock_), timeo_); \
393 add_wait_queue(sk_sleep(sk_), &wait_); \
395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
396 sched_annotate_sleep(); \
398 remove_wait_queue(sk_sleep(sk_), &wait_); \
404 * tipc_sk_create - create a TIPC socket
405 * @net: network namespace (must be default network)
406 * @sock: pre-allocated socket structure
407 * @protocol: protocol indicator (must be 0)
408 * @kern: caused by kernel or by userspace?
410 * This routine creates additional data structures used by the TIPC socket,
411 * initializes them, and links them together.
413 * Returns 0 on success, errno otherwise
415 static int tipc_sk_create(struct net *net, struct socket *sock,
416 int protocol, int kern)
418 const struct proto_ops *ops;
420 struct tipc_sock *tsk;
421 struct tipc_msg *msg;
423 /* Validate arguments */
424 if (unlikely(protocol != 0))
425 return -EPROTONOSUPPORT;
427 switch (sock->type) {
442 /* Allocate socket's protocol area */
443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
448 tsk->max_pkt = MAX_PKT_DEFAULT;
449 INIT_LIST_HEAD(&tsk->publications);
450 INIT_LIST_HEAD(&tsk->cong_links);
453 /* Finish initializing socket data structures */
455 sock_init_data(sock, sk);
456 tipc_set_sk_state(sk, TIPC_OPEN);
457 if (tipc_sk_insert(tsk)) {
459 pr_warn("Socket create failed; port number exhausted\n");
463 /* Ensure tsk is visible before we read own_addr. */
466 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
467 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
469 msg_set_origport(msg, tsk->portid);
470 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
472 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
473 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
474 sk->sk_data_ready = tipc_data_ready;
475 sk->sk_write_space = tipc_write_space;
476 sk->sk_destruct = tipc_sock_destruct;
477 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
478 tsk->group_is_open = true;
479 atomic_set(&tsk->dupl_rcvcnt, 0);
481 /* Start out with safe limits until we receive an advertised window */
482 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
483 tsk->rcv_win = tsk->snd_win;
485 if (tipc_sk_type_connectionless(sk)) {
486 tsk_set_unreturnable(tsk, true);
487 if (sock->type == SOCK_DGRAM)
488 tsk_set_unreliable(tsk, true);
490 __skb_queue_head_init(&tsk->mc_method.deferredq);
491 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
495 static void tipc_sk_callback(struct rcu_head *head)
497 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
502 /* Caller should hold socket lock for the socket. */
503 static void __tipc_shutdown(struct socket *sock, int error)
505 struct sock *sk = sock->sk;
506 struct tipc_sock *tsk = tipc_sk(sk);
507 struct net *net = sock_net(sk);
508 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
509 u32 dnode = tsk_peer_node(tsk);
512 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
513 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
514 !tsk_conn_cong(tsk)));
516 /* Remove any pending SYN message */
517 __skb_queue_purge(&sk->sk_write_queue);
519 /* Remove partially received buffer if any */
520 skb = skb_peek(&sk->sk_receive_queue);
521 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
522 __skb_unlink(skb, &sk->sk_receive_queue);
526 /* Reject all unreceived messages if connectionless */
527 if (tipc_sk_type_connectionless(sk)) {
528 tsk_rej_rx_queue(sk, error);
532 switch (sk->sk_state) {
533 case TIPC_CONNECTING:
534 case TIPC_ESTABLISHED:
535 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
536 tipc_node_remove_conn(net, dnode, tsk->portid);
537 /* Send a FIN+/- to its peer */
538 skb = __skb_dequeue(&sk->sk_receive_queue);
540 __skb_queue_purge(&sk->sk_receive_queue);
541 tipc_sk_respond(sk, skb, error);
544 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
545 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
546 tsk_own_node(tsk), tsk_peer_port(tsk),
549 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
552 /* Reject all SYN messages */
553 tsk_rej_rx_queue(sk, error);
556 __skb_queue_purge(&sk->sk_receive_queue);
562 * tipc_release - destroy a TIPC socket
563 * @sock: socket to destroy
565 * This routine cleans up any messages that are still queued on the socket.
566 * For DGRAM and RDM socket types, all queued messages are rejected.
567 * For SEQPACKET and STREAM socket types, the first message is rejected
568 * and any others are discarded. (If the first message on a STREAM socket
569 * is partially-read, it is discarded and the next one is rejected instead.)
571 * NOTE: Rejected messages are not necessarily returned to the sender! They
572 * are returned or discarded according to the "destination droppable" setting
573 * specified for the message by the sender.
575 * Returns 0 on success, errno otherwise
577 static int tipc_release(struct socket *sock)
579 struct sock *sk = sock->sk;
580 struct tipc_sock *tsk;
583 * Exit if socket isn't fully initialized (occurs when a failed accept()
584 * releases a pre-allocated child socket that was never used)
592 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
593 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
594 sk->sk_shutdown = SHUTDOWN_MASK;
596 tipc_sk_withdraw(tsk, 0, NULL);
597 __skb_queue_purge(&tsk->mc_method.deferredq);
598 sk_stop_timer(sk, &sk->sk_timer);
602 /* Reject any messages that accumulated in backlog queue */
604 tipc_dest_list_purge(&tsk->cong_links);
605 tsk->cong_link_cnt = 0;
606 call_rcu(&tsk->rcu, tipc_sk_callback);
613 * tipc_bind - associate or disassocate TIPC name(s) with a socket
614 * @sock: socket structure
615 * @uaddr: socket address describing name(s) and desired operation
616 * @uaddr_len: size of socket address data structure
618 * Name and name sequence binding is indicated using a positive scope value;
619 * a negative scope value unbinds the specified name. Specifying no name
620 * (i.e. a socket address length of 0) unbinds all names from the socket.
622 * Returns 0 on success, errno otherwise
624 * NOTE: This routine doesn't need to take the socket lock since it doesn't
625 * access any non-constant socket information.
627 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
630 struct sock *sk = sock->sk;
631 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
632 struct tipc_sock *tsk = tipc_sk(sk);
636 if (unlikely(!uaddr_len)) {
637 res = tipc_sk_withdraw(tsk, 0, NULL);
644 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
648 if (addr->family != AF_TIPC) {
653 if (addr->addrtype == TIPC_ADDR_NAME)
654 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
655 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
660 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
661 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
662 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
667 res = (addr->scope >= 0) ?
668 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
669 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
676 * tipc_getname - get port ID of socket or peer socket
677 * @sock: socket structure
678 * @uaddr: area for returned socket address
679 * @uaddr_len: area for returned length of socket address
680 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
682 * Returns 0 on success, errno otherwise
684 * NOTE: This routine doesn't need to take the socket lock since it only
685 * accesses socket information that is unchanging (or which changes in
686 * a completely predictable manner).
688 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
691 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
692 struct sock *sk = sock->sk;
693 struct tipc_sock *tsk = tipc_sk(sk);
695 memset(addr, 0, sizeof(*addr));
697 if ((!tipc_sk_connected(sk)) &&
698 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
700 addr->addr.id.ref = tsk_peer_port(tsk);
701 addr->addr.id.node = tsk_peer_node(tsk);
703 addr->addr.id.ref = tsk->portid;
704 addr->addr.id.node = tipc_own_addr(sock_net(sk));
707 addr->addrtype = TIPC_ADDR_ID;
708 addr->family = AF_TIPC;
710 addr->addr.name.domain = 0;
712 return sizeof(*addr);
716 * tipc_poll - read and possibly block on pollmask
717 * @file: file structure associated with the socket
718 * @sock: socket for which to calculate the poll bits
721 * Returns pollmask value
724 * It appears that the usual socket locking mechanisms are not useful here
725 * since the pollmask info is potentially out-of-date the moment this routine
726 * exits. TCP and other protocols seem to rely on higher level poll routines
727 * to handle any preventable race conditions, so TIPC will do the same ...
729 * IMPORTANT: The fact that a read or write operation is indicated does NOT
730 * imply that the operation will succeed, merely that it should be performed
731 * and will not block.
733 static __poll_t tipc_poll(struct file *file, struct socket *sock,
736 struct sock *sk = sock->sk;
737 struct tipc_sock *tsk = tipc_sk(sk);
738 __poll_t revents = 0;
740 sock_poll_wait(file, sock, wait);
741 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
743 if (sk->sk_shutdown & RCV_SHUTDOWN)
744 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
745 if (sk->sk_shutdown == SHUTDOWN_MASK)
748 switch (sk->sk_state) {
749 case TIPC_ESTABLISHED:
750 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
754 case TIPC_CONNECTING:
755 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
756 revents |= EPOLLIN | EPOLLRDNORM;
759 if (tsk->group_is_open && !tsk->cong_link_cnt)
761 if (!tipc_sk_type_connectionless(sk))
763 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
765 revents |= EPOLLIN | EPOLLRDNORM;
767 case TIPC_DISCONNECTING:
768 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
775 * tipc_sendmcast - send multicast message
776 * @sock: socket structure
777 * @seq: destination address
778 * @msg: message to send
779 * @dlen: length of data to send
780 * @timeout: timeout to wait for wakeup
782 * Called from function tipc_sendmsg(), which has done all sanity checks
783 * Returns the number of bytes sent on success, or errno
785 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
786 struct msghdr *msg, size_t dlen, long timeout)
788 struct sock *sk = sock->sk;
789 struct tipc_sock *tsk = tipc_sk(sk);
790 struct tipc_msg *hdr = &tsk->phdr;
791 struct net *net = sock_net(sk);
792 int mtu = tipc_bcast_get_mtu(net);
793 struct tipc_mc_method *method = &tsk->mc_method;
794 struct sk_buff_head pkts;
795 struct tipc_nlist dsts;
801 /* Block or return if any destination link is congested */
802 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
806 /* Lookup destination nodes */
807 tipc_nlist_init(&dsts, tipc_own_addr(net));
808 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
810 if (!dsts.local && !dsts.remote)
811 return -EHOSTUNREACH;
813 /* Build message header */
814 msg_set_type(hdr, TIPC_MCAST_MSG);
815 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
816 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
817 msg_set_destport(hdr, 0);
818 msg_set_destnode(hdr, 0);
819 msg_set_nametype(hdr, seq->type);
820 msg_set_namelower(hdr, seq->lower);
821 msg_set_nameupper(hdr, seq->upper);
823 /* Build message as chain of buffers */
824 __skb_queue_head_init(&pkts);
825 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
827 /* Send message if build was successful */
828 if (unlikely(rc == dlen)) {
829 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
830 TIPC_DUMP_SK_SNDQ, " ");
831 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
832 &tsk->cong_link_cnt);
835 tipc_nlist_purge(&dsts);
837 return rc ? rc : dlen;
841 * tipc_send_group_msg - send a message to a member in the group
842 * @net: network namespace
843 * @m: message to send
845 * @dnode: destination node
846 * @dport: destination port
847 * @dlen: total length of message data
849 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
850 struct msghdr *m, struct tipc_member *mb,
851 u32 dnode, u32 dport, int dlen)
853 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
854 struct tipc_mc_method *method = &tsk->mc_method;
855 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
856 struct tipc_msg *hdr = &tsk->phdr;
857 struct sk_buff_head pkts;
860 /* Complete message header */
861 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
862 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
863 msg_set_destport(hdr, dport);
864 msg_set_destnode(hdr, dnode);
865 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
867 /* Build message as chain of buffers */
868 __skb_queue_head_init(&pkts);
869 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
870 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
871 if (unlikely(rc != dlen))
875 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
876 if (unlikely(rc == -ELINKCONG)) {
877 tipc_dest_push(&tsk->cong_links, dnode, 0);
878 tsk->cong_link_cnt++;
881 /* Update send window */
882 tipc_group_update_member(mb, blks);
884 /* A broadcast sent within next EXPIRE period must follow same path */
885 method->rcast = true;
886 method->mandatory = true;
891 * tipc_send_group_unicast - send message to a member in the group
892 * @sock: socket structure
893 * @m: message to send
894 * @dlen: total length of message data
895 * @timeout: timeout to wait for wakeup
897 * Called from function tipc_sendmsg(), which has done all sanity checks
898 * Returns the number of bytes sent on success, or errno
900 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
901 int dlen, long timeout)
903 struct sock *sk = sock->sk;
904 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
905 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
906 struct tipc_sock *tsk = tipc_sk(sk);
907 struct net *net = sock_net(sk);
908 struct tipc_member *mb = NULL;
912 node = dest->addr.id.node;
913 port = dest->addr.id.ref;
915 return -EHOSTUNREACH;
917 /* Block or return if destination link or member is congested */
918 rc = tipc_wait_for_cond(sock, &timeout,
919 !tipc_dest_find(&tsk->cong_links, node, 0) &&
921 !tipc_group_cong(tsk->group, node, port, blks,
927 return -EHOSTUNREACH;
929 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
931 return rc ? rc : dlen;
935 * tipc_send_group_anycast - send message to any member with given identity
936 * @sock: socket structure
937 * @m: message to send
938 * @dlen: total length of message data
939 * @timeout: timeout to wait for wakeup
941 * Called from function tipc_sendmsg(), which has done all sanity checks
942 * Returns the number of bytes sent on success, or errno
944 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
945 int dlen, long timeout)
947 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
948 struct sock *sk = sock->sk;
949 struct tipc_sock *tsk = tipc_sk(sk);
950 struct list_head *cong_links = &tsk->cong_links;
951 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
952 struct tipc_msg *hdr = &tsk->phdr;
953 struct tipc_member *first = NULL;
954 struct tipc_member *mbr = NULL;
955 struct net *net = sock_net(sk);
956 u32 node, port, exclude;
957 struct list_head dsts;
958 u32 type, inst, scope;
963 INIT_LIST_HEAD(&dsts);
965 type = msg_nametype(hdr);
966 inst = dest->addr.name.name.instance;
967 scope = msg_lookup_scope(hdr);
969 while (++lookups < 4) {
970 exclude = tipc_group_exclude(tsk->group);
974 /* Look for a non-congested destination member, if any */
976 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
977 &dstcnt, exclude, false))
978 return -EHOSTUNREACH;
979 tipc_dest_pop(&dsts, &node, &port);
980 cong = tipc_group_cong(tsk->group, node, port, blks,
990 /* Start over if destination was not in member list */
994 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
997 /* Block or return if destination link or member is congested */
998 rc = tipc_wait_for_cond(sock, &timeout,
999 !tipc_dest_find(cong_links, node, 0) &&
1001 !tipc_group_cong(tsk->group, node, port,
1006 /* Send, unless destination disappeared while waiting */
1011 if (unlikely(lookups >= 4))
1012 return -EHOSTUNREACH;
1014 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1016 return rc ? rc : dlen;
1020 * tipc_send_group_bcast - send message to all members in communication group
1021 * @sk: socket structure
1022 * @m: message to send
1023 * @dlen: total length of message data
1024 * @timeout: timeout to wait for wakeup
1026 * Called from function tipc_sendmsg(), which has done all sanity checks
1027 * Returns the number of bytes sent on success, or errno
1029 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1030 int dlen, long timeout)
1032 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1033 struct sock *sk = sock->sk;
1034 struct net *net = sock_net(sk);
1035 struct tipc_sock *tsk = tipc_sk(sk);
1036 struct tipc_nlist *dsts;
1037 struct tipc_mc_method *method = &tsk->mc_method;
1038 bool ack = method->mandatory && method->rcast;
1039 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1040 struct tipc_msg *hdr = &tsk->phdr;
1041 int mtu = tipc_bcast_get_mtu(net);
1042 struct sk_buff_head pkts;
1043 int rc = -EHOSTUNREACH;
1045 /* Block or return if any destination link or member is congested */
1046 rc = tipc_wait_for_cond(sock, &timeout,
1047 !tsk->cong_link_cnt && tsk->group &&
1048 !tipc_group_bc_cong(tsk->group, blks));
1052 dsts = tipc_group_dests(tsk->group);
1053 if (!dsts->local && !dsts->remote)
1054 return -EHOSTUNREACH;
1056 /* Complete message header */
1058 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1059 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1061 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1062 msg_set_nameinst(hdr, 0);
1064 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1065 msg_set_destport(hdr, 0);
1066 msg_set_destnode(hdr, 0);
1067 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1069 /* Avoid getting stuck with repeated forced replicasts */
1070 msg_set_grp_bc_ack_req(hdr, ack);
1072 /* Build message as chain of buffers */
1073 __skb_queue_head_init(&pkts);
1074 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1075 if (unlikely(rc != dlen))
1079 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1083 /* Update broadcast sequence number and send windows */
1084 tipc_group_update_bc_members(tsk->group, blks, ack);
1086 /* Broadcast link is now free to choose method for next broadcast */
1087 method->mandatory = false;
1088 method->expires = jiffies;
1094 * tipc_send_group_mcast - send message to all members with given identity
1095 * @sock: socket structure
1096 * @m: message to send
1097 * @dlen: total length of message data
1098 * @timeout: timeout to wait for wakeup
1100 * Called from function tipc_sendmsg(), which has done all sanity checks
1101 * Returns the number of bytes sent on success, or errno
1103 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1104 int dlen, long timeout)
1106 struct sock *sk = sock->sk;
1107 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1108 struct tipc_sock *tsk = tipc_sk(sk);
1109 struct tipc_group *grp = tsk->group;
1110 struct tipc_msg *hdr = &tsk->phdr;
1111 struct net *net = sock_net(sk);
1112 u32 type, inst, scope, exclude;
1113 struct list_head dsts;
1116 INIT_LIST_HEAD(&dsts);
1118 type = msg_nametype(hdr);
1119 inst = dest->addr.name.name.instance;
1120 scope = msg_lookup_scope(hdr);
1121 exclude = tipc_group_exclude(grp);
1123 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1124 &dstcnt, exclude, true))
1125 return -EHOSTUNREACH;
1128 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1129 return tipc_send_group_unicast(sock, m, dlen, timeout);
1132 tipc_dest_list_purge(&dsts);
1133 return tipc_send_group_bcast(sock, m, dlen, timeout);
1137 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1138 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1139 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1141 * Multi-threaded: parallel calls with reference to same queues may occur
1143 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1144 struct sk_buff_head *inputq)
1146 u32 self = tipc_own_addr(net);
1147 u32 type, lower, upper, scope;
1148 struct sk_buff *skb, *_skb;
1150 struct sk_buff_head tmpq;
1151 struct list_head dports;
1152 struct tipc_msg *hdr;
1153 int user, mtyp, hlen;
1156 __skb_queue_head_init(&tmpq);
1157 INIT_LIST_HEAD(&dports);
1159 skb = tipc_skb_peek(arrvq, &inputq->lock);
1160 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1162 user = msg_user(hdr);
1163 mtyp = msg_type(hdr);
1164 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1165 onode = msg_orignode(hdr);
1166 type = msg_nametype(hdr);
1168 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1169 spin_lock_bh(&inputq->lock);
1170 if (skb_peek(arrvq) == skb) {
1171 __skb_dequeue(arrvq);
1172 __skb_queue_tail(inputq, skb);
1175 spin_unlock_bh(&inputq->lock);
1179 /* Group messages require exact scope match */
1180 if (msg_in_group(hdr)) {
1183 scope = msg_lookup_scope(hdr);
1186 /* TIPC_NODE_SCOPE means "any scope" in this context */
1188 scope = TIPC_NODE_SCOPE;
1190 scope = TIPC_CLUSTER_SCOPE;
1192 lower = msg_namelower(hdr);
1193 upper = msg_nameupper(hdr);
1196 /* Create destination port list: */
1197 tipc_nametbl_mc_lookup(net, type, lower, upper,
1198 scope, exact, &dports);
1200 /* Clone message per destination */
1201 while (tipc_dest_pop(&dports, NULL, &portid)) {
1202 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1204 msg_set_destport(buf_msg(_skb), portid);
1205 __skb_queue_tail(&tmpq, _skb);
1208 pr_warn("Failed to clone mcast rcv buffer\n");
1210 /* Append to inputq if not already done by other thread */
1211 spin_lock_bh(&inputq->lock);
1212 if (skb_peek(arrvq) == skb) {
1213 skb_queue_splice_tail_init(&tmpq, inputq);
1214 /* Decrease the skb's refcnt as increasing in the
1215 * function tipc_skb_peek
1217 kfree_skb(__skb_dequeue(arrvq));
1219 spin_unlock_bh(&inputq->lock);
1220 __skb_queue_purge(&tmpq);
1223 tipc_sk_rcv(net, inputq);
1227 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1228 * @tsk: receiving socket
1229 * @skb: pointer to message buffer.
1231 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1232 struct sk_buff_head *inputq,
1233 struct sk_buff_head *xmitq)
1235 struct tipc_msg *hdr = buf_msg(skb);
1236 u32 onode = tsk_own_node(tsk);
1237 struct sock *sk = &tsk->sk;
1238 int mtyp = msg_type(hdr);
1241 /* Ignore if connection cannot be validated: */
1242 if (!tsk_peer_msg(tsk, hdr)) {
1243 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1247 if (unlikely(msg_errcode(hdr))) {
1248 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1249 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1250 tsk_peer_port(tsk));
1251 sk->sk_state_change(sk);
1253 /* State change is ignored if socket already awake,
1254 * - convert msg to abort msg and add to inqueue
1256 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1257 msg_set_type(hdr, TIPC_CONN_MSG);
1258 msg_set_size(hdr, BASIC_H_SIZE);
1259 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1260 __skb_queue_tail(inputq, skb);
1264 tsk->probe_unacked = false;
1266 if (mtyp == CONN_PROBE) {
1267 msg_set_type(hdr, CONN_PROBE_REPLY);
1268 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1269 __skb_queue_tail(xmitq, skb);
1271 } else if (mtyp == CONN_ACK) {
1272 conn_cong = tsk_conn_cong(tsk);
1273 tsk->snt_unacked -= msg_conn_ack(hdr);
1274 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1275 tsk->snd_win = msg_adv_win(hdr);
1277 sk->sk_write_space(sk);
1278 } else if (mtyp != CONN_PROBE_REPLY) {
1279 pr_warn("Received unknown CONN_PROTO msg\n");
1286 * tipc_sendmsg - send message in connectionless manner
1287 * @sock: socket structure
1288 * @m: message to send
1289 * @dsz: amount of user data to be sent
1291 * Message must have an destination specified explicitly.
1292 * Used for SOCK_RDM and SOCK_DGRAM messages,
1293 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1294 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1296 * Returns the number of bytes sent on success, or errno otherwise
1298 static int tipc_sendmsg(struct socket *sock,
1299 struct msghdr *m, size_t dsz)
1301 struct sock *sk = sock->sk;
1305 ret = __tipc_sendmsg(sock, m, dsz);
1311 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1313 struct sock *sk = sock->sk;
1314 struct net *net = sock_net(sk);
1315 struct tipc_sock *tsk = tipc_sk(sk);
1316 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1317 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1318 struct list_head *clinks = &tsk->cong_links;
1319 bool syn = !tipc_sk_type_connectionless(sk);
1320 struct tipc_group *grp = tsk->group;
1321 struct tipc_msg *hdr = &tsk->phdr;
1322 struct tipc_name_seq *seq;
1323 struct sk_buff_head pkts;
1324 u32 dport = 0, dnode = 0;
1325 u32 type = 0, inst = 0;
1328 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1332 if (unlikely(m->msg_namelen < sizeof(*dest)))
1334 if (unlikely(dest->family != AF_TIPC))
1340 return tipc_send_group_bcast(sock, m, dlen, timeout);
1341 if (dest->addrtype == TIPC_ADDR_NAME)
1342 return tipc_send_group_anycast(sock, m, dlen, timeout);
1343 if (dest->addrtype == TIPC_ADDR_ID)
1344 return tipc_send_group_unicast(sock, m, dlen, timeout);
1345 if (dest->addrtype == TIPC_ADDR_MCAST)
1346 return tipc_send_group_mcast(sock, m, dlen, timeout);
1350 if (unlikely(!dest)) {
1352 if (!syn && dest->family != AF_TIPC)
1353 return -EDESTADDRREQ;
1356 if (unlikely(syn)) {
1357 if (sk->sk_state == TIPC_LISTEN)
1359 if (sk->sk_state != TIPC_OPEN)
1363 if (dest->addrtype == TIPC_ADDR_NAME) {
1364 tsk->conn_type = dest->addr.name.name.type;
1365 tsk->conn_instance = dest->addr.name.name.instance;
1367 msg_set_syn(hdr, 1);
1370 seq = &dest->addr.nameseq;
1371 if (dest->addrtype == TIPC_ADDR_MCAST)
1372 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1374 if (dest->addrtype == TIPC_ADDR_NAME) {
1375 type = dest->addr.name.name.type;
1376 inst = dest->addr.name.name.instance;
1377 dnode = dest->addr.name.domain;
1378 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1379 if (unlikely(!dport && !dnode))
1380 return -EHOSTUNREACH;
1381 } else if (dest->addrtype == TIPC_ADDR_ID) {
1382 dnode = dest->addr.id.node;
1387 /* Block or return if destination link is congested */
1388 rc = tipc_wait_for_cond(sock, &timeout,
1389 !tipc_dest_find(clinks, dnode, 0));
1393 if (dest->addrtype == TIPC_ADDR_NAME) {
1394 msg_set_type(hdr, TIPC_NAMED_MSG);
1395 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1396 msg_set_nametype(hdr, type);
1397 msg_set_nameinst(hdr, inst);
1398 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1399 msg_set_destnode(hdr, dnode);
1400 msg_set_destport(hdr, dport);
1401 } else { /* TIPC_ADDR_ID */
1402 msg_set_type(hdr, TIPC_DIRECT_MSG);
1403 msg_set_lookup_scope(hdr, 0);
1404 msg_set_destnode(hdr, dnode);
1405 msg_set_destport(hdr, dest->addr.id.ref);
1406 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1409 __skb_queue_head_init(&pkts);
1410 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
1411 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1412 if (unlikely(rc != dlen))
1414 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1415 __skb_queue_purge(&pkts);
1419 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1420 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1421 if (unlikely(rc == -ELINKCONG)) {
1422 tipc_dest_push(clinks, dnode, 0);
1423 tsk->cong_link_cnt++;
1427 if (unlikely(syn && !rc))
1428 tipc_set_sk_state(sk, TIPC_CONNECTING);
1430 return rc ? rc : dlen;
1434 * tipc_sendstream - send stream-oriented data
1435 * @sock: socket structure
1437 * @dsz: total length of data to be transmitted
1439 * Used for SOCK_STREAM data.
1441 * Returns the number of bytes sent on success (or partial success),
1442 * or errno if no data sent
1444 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1446 struct sock *sk = sock->sk;
1450 ret = __tipc_sendstream(sock, m, dsz);
1456 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1458 struct sock *sk = sock->sk;
1459 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1460 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1461 struct tipc_sock *tsk = tipc_sk(sk);
1462 struct tipc_msg *hdr = &tsk->phdr;
1463 struct net *net = sock_net(sk);
1464 struct sk_buff_head pkts;
1465 u32 dnode = tsk_peer_node(tsk);
1469 __skb_queue_head_init(&pkts);
1471 if (unlikely(dlen > INT_MAX))
1474 /* Handle implicit connection setup */
1475 if (unlikely(dest)) {
1476 rc = __tipc_sendmsg(sock, m, dlen);
1477 if (dlen && dlen == rc) {
1478 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1479 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1485 rc = tipc_wait_for_cond(sock, &timeout,
1486 (!tsk->cong_link_cnt &&
1487 !tsk_conn_cong(tsk) &&
1488 tipc_sk_connected(sk)));
1492 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1493 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1494 if (unlikely(rc != send))
1497 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1498 TIPC_DUMP_SK_SNDQ, " ");
1499 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1500 if (unlikely(rc == -ELINKCONG)) {
1501 tsk->cong_link_cnt = 1;
1505 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1508 } while (sent < dlen && !rc);
1510 return sent ? sent : rc;
1514 * tipc_send_packet - send a connection-oriented message
1515 * @sock: socket structure
1516 * @m: message to send
1517 * @dsz: length of data to be transmitted
1519 * Used for SOCK_SEQPACKET messages.
1521 * Returns the number of bytes sent on success, or errno otherwise
1523 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1525 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1528 return tipc_sendstream(sock, m, dsz);
1531 /* tipc_sk_finish_conn - complete the setup of a connection
1533 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1536 struct sock *sk = &tsk->sk;
1537 struct net *net = sock_net(sk);
1538 struct tipc_msg *msg = &tsk->phdr;
1540 msg_set_syn(msg, 0);
1541 msg_set_destnode(msg, peer_node);
1542 msg_set_destport(msg, peer_port);
1543 msg_set_type(msg, TIPC_CONN_MSG);
1544 msg_set_lookup_scope(msg, 0);
1545 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1547 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1548 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1549 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1550 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1551 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1552 __skb_queue_purge(&sk->sk_write_queue);
1553 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1556 /* Fall back to message based flow control */
1557 tsk->rcv_win = FLOWCTL_MSG_WIN;
1558 tsk->snd_win = FLOWCTL_MSG_WIN;
1562 * tipc_sk_set_orig_addr - capture sender's address for received message
1563 * @m: descriptor for message info
1564 * @hdr: received message header
1566 * Note: Address is not captured if not requested by receiver.
1568 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1570 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1571 struct tipc_msg *hdr = buf_msg(skb);
1576 srcaddr->sock.family = AF_TIPC;
1577 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1578 srcaddr->sock.scope = 0;
1579 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1580 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1581 srcaddr->sock.addr.name.domain = 0;
1582 m->msg_namelen = sizeof(struct sockaddr_tipc);
1584 if (!msg_in_group(hdr))
1587 /* Group message users may also want to know sending member's id */
1588 srcaddr->member.family = AF_TIPC;
1589 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1590 srcaddr->member.scope = 0;
1591 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1592 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1593 srcaddr->member.addr.name.domain = 0;
1594 m->msg_namelen = sizeof(*srcaddr);
1598 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1599 * @m: descriptor for message info
1600 * @skb: received message buffer
1601 * @tsk: TIPC port associated with message
1603 * Note: Ancillary data is not captured if not requested by receiver.
1605 * Returns 0 if successful, otherwise errno
1607 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1608 struct tipc_sock *tsk)
1610 struct tipc_msg *msg;
1617 if (likely(m->msg_controllen == 0))
1621 /* Optionally capture errored message object(s) */
1622 err = msg ? msg_errcode(msg) : 0;
1623 if (unlikely(err)) {
1625 anc_data[1] = msg_data_sz(msg);
1626 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1630 if (skb_linearize(skb))
1633 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1640 /* Optionally capture message destination object */
1641 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1642 switch (dest_type) {
1643 case TIPC_NAMED_MSG:
1645 anc_data[0] = msg_nametype(msg);
1646 anc_data[1] = msg_namelower(msg);
1647 anc_data[2] = msg_namelower(msg);
1649 case TIPC_MCAST_MSG:
1651 anc_data[0] = msg_nametype(msg);
1652 anc_data[1] = msg_namelower(msg);
1653 anc_data[2] = msg_nameupper(msg);
1656 has_name = (tsk->conn_type != 0);
1657 anc_data[0] = tsk->conn_type;
1658 anc_data[1] = tsk->conn_instance;
1659 anc_data[2] = tsk->conn_instance;
1665 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1673 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1675 struct sock *sk = &tsk->sk;
1676 struct net *net = sock_net(sk);
1677 struct sk_buff *skb = NULL;
1678 struct tipc_msg *msg;
1679 u32 peer_port = tsk_peer_port(tsk);
1680 u32 dnode = tsk_peer_node(tsk);
1682 if (!tipc_sk_connected(sk))
1684 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1685 dnode, tsk_own_node(tsk), peer_port,
1686 tsk->portid, TIPC_OK);
1690 msg_set_conn_ack(msg, tsk->rcv_unacked);
1691 tsk->rcv_unacked = 0;
1693 /* Adjust to and advertize the correct window limit */
1694 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1695 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1696 msg_set_adv_win(msg, tsk->rcv_win);
1698 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1701 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1703 struct sock *sk = sock->sk;
1704 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1705 long timeo = *timeop;
1706 int err = sock_error(sk);
1712 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1713 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1717 add_wait_queue(sk_sleep(sk), &wait);
1719 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1720 sched_annotate_sleep();
1722 remove_wait_queue(sk_sleep(sk), &wait);
1725 if (!skb_queue_empty(&sk->sk_receive_queue))
1730 err = sock_intr_errno(timeo);
1731 if (signal_pending(current))
1734 err = sock_error(sk);
1743 * tipc_recvmsg - receive packet-oriented message
1744 * @m: descriptor for message info
1745 * @buflen: length of user buffer area
1746 * @flags: receive flags
1748 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1749 * If the complete message doesn't fit in user area, truncate it.
1751 * Returns size of returned message data, errno otherwise
1753 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1754 size_t buflen, int flags)
1756 struct sock *sk = sock->sk;
1757 bool connected = !tipc_sk_type_connectionless(sk);
1758 struct tipc_sock *tsk = tipc_sk(sk);
1759 int rc, err, hlen, dlen, copy;
1760 struct tipc_skb_cb *skb_cb;
1761 struct sk_buff_head xmitq;
1762 struct tipc_msg *hdr;
1763 struct sk_buff *skb;
1767 /* Catch invalid receive requests */
1768 if (unlikely(!buflen))
1772 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1776 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1778 /* Step rcv queue to first msg with data or error; wait if necessary */
1780 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1783 skb = skb_peek(&sk->sk_receive_queue);
1784 skb_cb = TIPC_SKB_CB(skb);
1786 dlen = msg_data_sz(hdr);
1787 hlen = msg_hdr_sz(hdr);
1788 err = msg_errcode(hdr);
1789 grp_evt = msg_is_grp_evt(hdr);
1790 if (likely(dlen || err))
1792 tsk_advance_rx_queue(sk);
1795 /* Collect msg meta data, including error code and rejected data */
1796 tipc_sk_set_orig_addr(m, skb);
1797 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1802 /* Capture data if non-error msg, otherwise just set return value */
1804 int offset = skb_cb->bytes_read;
1806 copy = min_t(int, dlen - offset, buflen);
1807 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1810 if (unlikely(offset + copy < dlen)) {
1811 if (flags & MSG_EOR) {
1812 if (!(flags & MSG_PEEK))
1813 skb_cb->bytes_read = offset + copy;
1815 m->msg_flags |= MSG_TRUNC;
1816 skb_cb->bytes_read = 0;
1819 if (flags & MSG_EOR)
1820 m->msg_flags |= MSG_EOR;
1821 skb_cb->bytes_read = 0;
1826 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
1832 /* Mark message as group event if applicable */
1833 if (unlikely(grp_evt)) {
1834 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1835 m->msg_flags |= MSG_EOR;
1836 m->msg_flags |= MSG_OOB;
1840 /* Caption of data or error code/rejected data was successful */
1841 if (unlikely(flags & MSG_PEEK))
1844 /* Send group flow control advertisement when applicable */
1845 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1846 __skb_queue_head_init(&xmitq);
1847 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1848 msg_orignode(hdr), msg_origport(hdr),
1850 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1853 if (skb_cb->bytes_read)
1856 tsk_advance_rx_queue(sk);
1858 if (likely(!connected))
1861 /* Send connection flow control advertisement when applicable */
1862 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1863 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1864 tipc_sk_send_ack(tsk);
1867 return rc ? rc : copy;
1871 * tipc_recvstream - receive stream-oriented data
1872 * @m: descriptor for message info
1873 * @buflen: total size of user buffer area
1874 * @flags: receive flags
1876 * Used for SOCK_STREAM messages only. If not enough data is available
1877 * will optionally wait for more; never truncates data.
1879 * Returns size of returned message data, errno otherwise
1881 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1882 size_t buflen, int flags)
1884 struct sock *sk = sock->sk;
1885 struct tipc_sock *tsk = tipc_sk(sk);
1886 struct sk_buff *skb;
1887 struct tipc_msg *hdr;
1888 struct tipc_skb_cb *skb_cb;
1889 bool peek = flags & MSG_PEEK;
1890 int offset, required, copy, copied = 0;
1891 int hlen, dlen, err, rc;
1894 /* Catch invalid receive attempts */
1895 if (unlikely(!buflen))
1900 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1904 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1905 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1908 /* Look at first msg in receive queue; wait if necessary */
1909 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1912 skb = skb_peek(&sk->sk_receive_queue);
1913 skb_cb = TIPC_SKB_CB(skb);
1915 dlen = msg_data_sz(hdr);
1916 hlen = msg_hdr_sz(hdr);
1917 err = msg_errcode(hdr);
1919 /* Discard any empty non-errored (SYN-) message */
1920 if (unlikely(!dlen && !err)) {
1921 tsk_advance_rx_queue(sk);
1925 /* Collect msg meta data, incl. error code and rejected data */
1927 tipc_sk_set_orig_addr(m, skb);
1928 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1934 /* Copy data if msg ok, otherwise return error/partial data */
1936 offset = skb_cb->bytes_read;
1937 copy = min_t(int, dlen - offset, buflen - copied);
1938 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1943 if (unlikely(offset < dlen)) {
1945 skb_cb->bytes_read = offset;
1950 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1959 tsk_advance_rx_queue(sk);
1961 /* Send connection flow control advertisement when applicable */
1962 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1963 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1964 tipc_sk_send_ack(tsk);
1966 /* Exit if all requested data or FIN/error received */
1967 if (copied == buflen || err)
1970 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1973 return copied ? copied : rc;
1977 * tipc_write_space - wake up thread if port congestion is released
1980 static void tipc_write_space(struct sock *sk)
1982 struct socket_wq *wq;
1985 wq = rcu_dereference(sk->sk_wq);
1986 if (skwq_has_sleeper(wq))
1987 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1988 EPOLLWRNORM | EPOLLWRBAND);
1993 * tipc_data_ready - wake up threads to indicate messages have been received
1995 * @len: the length of messages
1997 static void tipc_data_ready(struct sock *sk)
1999 struct socket_wq *wq;
2002 wq = rcu_dereference(sk->sk_wq);
2003 if (skwq_has_sleeper(wq))
2004 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2005 EPOLLRDNORM | EPOLLRDBAND);
2009 static void tipc_sock_destruct(struct sock *sk)
2011 __skb_queue_purge(&sk->sk_receive_queue);
2014 static void tipc_sk_proto_rcv(struct sock *sk,
2015 struct sk_buff_head *inputq,
2016 struct sk_buff_head *xmitq)
2018 struct sk_buff *skb = __skb_dequeue(inputq);
2019 struct tipc_sock *tsk = tipc_sk(sk);
2020 struct tipc_msg *hdr = buf_msg(skb);
2021 struct tipc_group *grp = tsk->group;
2022 bool wakeup = false;
2024 switch (msg_user(hdr)) {
2026 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2029 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2030 /* coupled with smp_rmb() in tipc_wait_for_cond() */
2032 tsk->cong_link_cnt--;
2035 case GROUP_PROTOCOL:
2036 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2039 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2040 hdr, inputq, xmitq);
2047 sk->sk_write_space(sk);
2053 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2055 * @skb: pointer to message buffer.
2056 * Returns true if message should be added to receive queue, false otherwise
2058 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2060 struct sock *sk = &tsk->sk;
2061 struct net *net = sock_net(sk);
2062 struct tipc_msg *hdr = buf_msg(skb);
2063 bool con_msg = msg_connected(hdr);
2064 u32 pport = tsk_peer_port(tsk);
2065 u32 pnode = tsk_peer_node(tsk);
2066 u32 oport = msg_origport(hdr);
2067 u32 onode = msg_orignode(hdr);
2068 int err = msg_errcode(hdr);
2069 unsigned long delay;
2071 if (unlikely(msg_mcast(hdr)))
2074 switch (sk->sk_state) {
2075 case TIPC_CONNECTING:
2077 if (likely(con_msg)) {
2080 tipc_sk_finish_conn(tsk, oport, onode);
2081 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2082 /* ACK+ message with data is added to receive queue */
2083 if (msg_data_sz(hdr))
2085 /* Empty ACK-, - wake up sleeping connect() and drop */
2086 sk->sk_state_change(sk);
2087 msg_set_dest_droppable(hdr, 1);
2090 /* Ignore connectionless message if not from listening socket */
2091 if (oport != pport || onode != pnode)
2095 if (err != TIPC_ERR_OVERLOAD)
2098 /* Prepare for new setup attempt if we have a SYN clone */
2099 if (skb_queue_empty(&sk->sk_write_queue))
2101 get_random_bytes(&delay, 2);
2102 delay %= (tsk->conn_timeout / 4);
2103 delay = msecs_to_jiffies(delay + 100);
2104 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2107 case TIPC_DISCONNECTING:
2110 /* Accept only SYN message */
2111 if (!msg_is_syn(hdr) &&
2112 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2114 if (!con_msg && !err)
2117 case TIPC_ESTABLISHED:
2118 /* Accept only connection-based messages sent by peer */
2119 if (likely(con_msg && !err && pport == oport && pnode == onode))
2121 if (!tsk_peer_msg(tsk, hdr))
2125 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2126 tipc_node_remove_conn(net, pnode, tsk->portid);
2127 sk->sk_state_change(sk);
2130 pr_err("Unknown sk_state %u\n", sk->sk_state);
2132 /* Abort connection setup attempt */
2133 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2134 sk->sk_err = ECONNREFUSED;
2135 sk->sk_state_change(sk);
2140 * rcvbuf_limit - get proper overload limit of socket receive queue
2144 * For connection oriented messages, irrespective of importance,
2145 * default queue limit is 2 MB.
2147 * For connectionless messages, queue limits are based on message
2148 * importance as follows:
2150 * TIPC_LOW_IMPORTANCE (2 MB)
2151 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2152 * TIPC_HIGH_IMPORTANCE (8 MB)
2153 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2155 * Returns overload limit according to corresponding message importance
2157 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2159 struct tipc_sock *tsk = tipc_sk(sk);
2160 struct tipc_msg *hdr = buf_msg(skb);
2162 if (unlikely(msg_in_group(hdr)))
2163 return READ_ONCE(sk->sk_rcvbuf);
2165 if (unlikely(!msg_connected(hdr)))
2166 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2168 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2169 return READ_ONCE(sk->sk_rcvbuf);
2171 return FLOWCTL_MSG_LIM;
2175 * tipc_sk_filter_rcv - validate incoming message
2177 * @skb: pointer to message.
2179 * Enqueues message on receive queue if acceptable; optionally handles
2180 * disconnect indication for a connected socket.
2182 * Called with socket lock already taken
2185 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2186 struct sk_buff_head *xmitq)
2188 bool sk_conn = !tipc_sk_type_connectionless(sk);
2189 struct tipc_sock *tsk = tipc_sk(sk);
2190 struct tipc_group *grp = tsk->group;
2191 struct tipc_msg *hdr = buf_msg(skb);
2192 struct net *net = sock_net(sk);
2193 struct sk_buff_head inputq;
2194 int mtyp = msg_type(hdr);
2195 int limit, err = TIPC_OK;
2197 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2198 TIPC_SKB_CB(skb)->bytes_read = 0;
2199 __skb_queue_head_init(&inputq);
2200 __skb_queue_tail(&inputq, skb);
2202 if (unlikely(!msg_isdata(hdr)))
2203 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2206 tipc_group_filter_msg(grp, &inputq, xmitq);
2208 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2209 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2211 /* Validate and add to receive buffer if there is space */
2212 while ((skb = __skb_dequeue(&inputq))) {
2214 limit = rcvbuf_limit(sk, skb);
2215 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2216 (!sk_conn && msg_connected(hdr)) ||
2217 (!grp && msg_in_group(hdr)))
2218 err = TIPC_ERR_NO_PORT;
2219 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2220 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2222 atomic_inc(&sk->sk_drops);
2223 err = TIPC_ERR_OVERLOAD;
2226 if (unlikely(err)) {
2227 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2228 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2230 __skb_queue_tail(xmitq, skb);
2235 __skb_queue_tail(&sk->sk_receive_queue, skb);
2236 skb_set_owner_r(skb, sk);
2237 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2238 "rcvq >90% allocated!");
2239 sk->sk_data_ready(sk);
2244 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2248 * Caller must hold socket lock
2250 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2252 unsigned int before = sk_rmem_alloc_get(sk);
2253 struct sk_buff_head xmitq;
2256 __skb_queue_head_init(&xmitq);
2258 tipc_sk_filter_rcv(sk, skb, &xmitq);
2259 added = sk_rmem_alloc_get(sk) - before;
2260 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2262 /* Send pending response/rejected messages, if any */
2263 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2268 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2269 * inputq and try adding them to socket or backlog queue
2270 * @inputq: list of incoming buffers with potentially different destinations
2271 * @sk: socket where the buffers should be enqueued
2272 * @dport: port number for the socket
2274 * Caller must hold socket lock
2276 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2277 u32 dport, struct sk_buff_head *xmitq)
2279 unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
2280 struct sk_buff *skb;
2285 while (skb_queue_len(inputq)) {
2286 if (unlikely(time_after_eq(jiffies, time_limit)))
2289 skb = tipc_skb_dequeue(inputq, dport);
2293 /* Add message directly to receive queue if possible */
2294 if (!sock_owned_by_user(sk)) {
2295 tipc_sk_filter_rcv(sk, skb, xmitq);
2299 /* Try backlog, compensating for double-counted bytes */
2300 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2301 if (!sk->sk_backlog.len)
2302 atomic_set(dcnt, 0);
2303 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2304 if (likely(!sk_add_backlog(sk, skb, lim))) {
2305 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2306 "bklg & rcvq >90% allocated!");
2310 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2311 /* Overload => reject message back to sender */
2312 onode = tipc_own_addr(sock_net(sk));
2313 atomic_inc(&sk->sk_drops);
2314 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2315 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2317 __skb_queue_tail(xmitq, skb);
2324 * tipc_sk_rcv - handle a chain of incoming buffers
2325 * @inputq: buffer list containing the buffers
2326 * Consumes all buffers in list until inputq is empty
2327 * Note: may be called in multiple threads referring to the same queue
2329 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2331 struct sk_buff_head xmitq;
2332 u32 dnode, dport = 0;
2334 struct tipc_sock *tsk;
2336 struct sk_buff *skb;
2338 __skb_queue_head_init(&xmitq);
2339 while (skb_queue_len(inputq)) {
2340 dport = tipc_skb_peek_port(inputq, dport);
2341 tsk = tipc_sk_lookup(net, dport);
2345 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2346 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2347 spin_unlock_bh(&sk->sk_lock.slock);
2349 /* Send pending response/rejected messages, if any */
2350 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2354 /* No destination socket => dequeue skb if still there */
2355 skb = tipc_skb_dequeue(inputq, dport);
2359 /* Try secondary lookup if unresolved named message */
2360 err = TIPC_ERR_NO_PORT;
2361 if (tipc_msg_lookup_dest(net, skb, &err))
2364 /* Prepare for message rejection */
2365 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2368 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2370 dnode = msg_destnode(buf_msg(skb));
2371 tipc_node_xmit_skb(net, skb, dnode, dport);
2375 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2377 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2378 struct sock *sk = sock->sk;
2382 int err = sock_error(sk);
2387 if (signal_pending(current))
2388 return sock_intr_errno(*timeo_p);
2390 add_wait_queue(sk_sleep(sk), &wait);
2391 done = sk_wait_event(sk, timeo_p,
2392 sk->sk_state != TIPC_CONNECTING, &wait);
2393 remove_wait_queue(sk_sleep(sk), &wait);
2398 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2400 if (addr->family != AF_TIPC)
2402 if (addr->addrtype == TIPC_SERVICE_RANGE)
2403 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2404 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2405 addr->addrtype == TIPC_SOCKET_ADDR);
2409 * tipc_connect - establish a connection to another TIPC port
2410 * @sock: socket structure
2411 * @dest: socket address for destination port
2412 * @destlen: size of socket address data structure
2413 * @flags: file-related flags associated with socket
2415 * Returns 0 on success, errno otherwise
2417 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2418 int destlen, int flags)
2420 struct sock *sk = sock->sk;
2421 struct tipc_sock *tsk = tipc_sk(sk);
2422 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2423 struct msghdr m = {NULL,};
2424 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2428 if (destlen != sizeof(struct sockaddr_tipc))
2438 if (dst->family == AF_UNSPEC) {
2439 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2440 if (!tipc_sk_type_connectionless(sk))
2444 if (!tipc_sockaddr_is_sane(dst)) {
2448 /* DGRAM/RDM connect(), just save the destaddr */
2449 if (tipc_sk_type_connectionless(sk)) {
2450 memcpy(&tsk->peer, dest, destlen);
2452 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2457 previous = sk->sk_state;
2459 switch (sk->sk_state) {
2461 /* Send a 'SYN-' to destination */
2463 m.msg_namelen = destlen;
2465 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2466 * indicate send_msg() is never blocked.
2469 m.msg_flags = MSG_DONTWAIT;
2471 res = __tipc_sendmsg(sock, &m, 0);
2472 if ((res < 0) && (res != -EWOULDBLOCK))
2475 /* Just entered TIPC_CONNECTING state; the only
2476 * difference is that return value in non-blocking
2477 * case is EINPROGRESS, rather than EALREADY.
2481 case TIPC_CONNECTING:
2483 if (previous == TIPC_CONNECTING)
2487 timeout = msecs_to_jiffies(timeout);
2488 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2489 res = tipc_wait_for_connect(sock, &timeout);
2491 case TIPC_ESTABLISHED:
2504 * tipc_listen - allow socket to listen for incoming connections
2505 * @sock: socket structure
2508 * Returns 0 on success, errno otherwise
2510 static int tipc_listen(struct socket *sock, int len)
2512 struct sock *sk = sock->sk;
2516 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2522 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2524 struct sock *sk = sock->sk;
2525 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2528 /* True wake-one mechanism for incoming connections: only
2529 * one process gets woken up, not the 'whole herd'.
2530 * Since we do not 'race & poll' for established sockets
2531 * anymore, the common case will execute the loop only once.
2534 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2535 add_wait_queue(sk_sleep(sk), &wait);
2537 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
2539 remove_wait_queue(sk_sleep(sk), &wait);
2542 if (!skb_queue_empty(&sk->sk_receive_queue))
2547 err = sock_intr_errno(timeo);
2548 if (signal_pending(current))
2555 * tipc_accept - wait for connection request
2556 * @sock: listening socket
2557 * @newsock: new socket that is to be connected
2558 * @flags: file-related flags associated with socket
2560 * Returns 0 on success, errno otherwise
2562 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2565 struct sock *new_sk, *sk = sock->sk;
2566 struct sk_buff *buf;
2567 struct tipc_sock *new_tsock;
2568 struct tipc_msg *msg;
2574 if (sk->sk_state != TIPC_LISTEN) {
2578 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2579 res = tipc_wait_for_accept(sock, timeo);
2583 buf = skb_peek(&sk->sk_receive_queue);
2585 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2588 security_sk_clone(sock->sk, new_sock->sk);
2590 new_sk = new_sock->sk;
2591 new_tsock = tipc_sk(new_sk);
2594 /* we lock on new_sk; but lockdep sees the lock on sk */
2595 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2598 * Reject any stray messages received by new socket
2599 * before the socket lock was taken (very, very unlikely)
2601 tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
2603 /* Connect new socket to it's peer */
2604 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2606 tsk_set_importance(new_tsock, msg_importance(msg));
2607 if (msg_named(msg)) {
2608 new_tsock->conn_type = msg_nametype(msg);
2609 new_tsock->conn_instance = msg_nameinst(msg);
2613 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2614 * Respond to 'SYN+' by queuing it on new socket.
2616 if (!msg_data_sz(msg)) {
2617 struct msghdr m = {NULL,};
2619 tsk_advance_rx_queue(sk);
2620 __tipc_sendstream(new_sock, &m, 0);
2622 __skb_dequeue(&sk->sk_receive_queue);
2623 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2624 skb_set_owner_r(buf, new_sk);
2626 release_sock(new_sk);
2633 * tipc_shutdown - shutdown socket connection
2634 * @sock: socket structure
2635 * @how: direction to close (must be SHUT_RDWR)
2637 * Terminates connection (if necessary), then purges socket's receive queue.
2639 * Returns 0 on success, errno otherwise
2641 static int tipc_shutdown(struct socket *sock, int how)
2643 struct sock *sk = sock->sk;
2646 if (how != SHUT_RDWR)
2651 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2652 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2653 sk->sk_shutdown = SHUTDOWN_MASK;
2655 if (sk->sk_state == TIPC_DISCONNECTING) {
2656 /* Discard any unreceived messages */
2657 __skb_queue_purge(&sk->sk_receive_queue);
2663 /* Wake up anyone sleeping in poll. */
2664 sk->sk_state_change(sk);
2670 static void tipc_sk_check_probing_state(struct sock *sk,
2671 struct sk_buff_head *list)
2673 struct tipc_sock *tsk = tipc_sk(sk);
2674 u32 pnode = tsk_peer_node(tsk);
2675 u32 pport = tsk_peer_port(tsk);
2676 u32 self = tsk_own_node(tsk);
2677 u32 oport = tsk->portid;
2678 struct sk_buff *skb;
2680 if (tsk->probe_unacked) {
2681 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2682 sk->sk_err = ECONNABORTED;
2683 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2684 sk->sk_state_change(sk);
2687 /* Prepare new probe */
2688 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2689 pnode, self, pport, oport, TIPC_OK);
2691 __skb_queue_tail(list, skb);
2692 tsk->probe_unacked = true;
2693 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2696 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2698 struct tipc_sock *tsk = tipc_sk(sk);
2700 /* Try again later if dest link is congested */
2701 if (tsk->cong_link_cnt) {
2702 sk_reset_timer(sk, &sk->sk_timer,
2703 jiffies + msecs_to_jiffies(100));
2706 /* Prepare SYN for retransmit */
2707 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2710 static void tipc_sk_timeout(struct timer_list *t)
2712 struct sock *sk = from_timer(sk, t, sk_timer);
2713 struct tipc_sock *tsk = tipc_sk(sk);
2714 u32 pnode = tsk_peer_node(tsk);
2715 struct sk_buff_head list;
2718 __skb_queue_head_init(&list);
2721 /* Try again later if socket is busy */
2722 if (sock_owned_by_user(sk)) {
2723 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2729 if (sk->sk_state == TIPC_ESTABLISHED)
2730 tipc_sk_check_probing_state(sk, &list);
2731 else if (sk->sk_state == TIPC_CONNECTING)
2732 tipc_sk_retry_connect(sk, &list);
2736 if (!skb_queue_empty(&list))
2737 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2739 /* SYN messages may cause link congestion */
2740 if (rc == -ELINKCONG) {
2741 tipc_dest_push(&tsk->cong_links, pnode, 0);
2742 tsk->cong_link_cnt = 1;
2747 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2748 struct tipc_name_seq const *seq)
2750 struct sock *sk = &tsk->sk;
2751 struct net *net = sock_net(sk);
2752 struct publication *publ;
2755 if (scope != TIPC_NODE_SCOPE)
2756 scope = TIPC_CLUSTER_SCOPE;
2758 if (tipc_sk_connected(sk))
2760 key = tsk->portid + tsk->pub_count + 1;
2761 if (key == tsk->portid)
2764 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2765 scope, tsk->portid, key);
2766 if (unlikely(!publ))
2769 list_add(&publ->binding_sock, &tsk->publications);
2775 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2776 struct tipc_name_seq const *seq)
2778 struct net *net = sock_net(&tsk->sk);
2779 struct publication *publ;
2780 struct publication *safe;
2783 if (scope != TIPC_NODE_SCOPE)
2784 scope = TIPC_CLUSTER_SCOPE;
2786 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2788 if (publ->scope != scope)
2790 if (publ->type != seq->type)
2792 if (publ->lower != seq->lower)
2794 if (publ->upper != seq->upper)
2796 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2797 publ->upper, publ->key);
2801 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2802 publ->upper, publ->key);
2805 if (list_empty(&tsk->publications))
2810 /* tipc_sk_reinit: set non-zero address in all existing sockets
2811 * when we go from standalone to network mode.
2813 void tipc_sk_reinit(struct net *net)
2815 struct tipc_net *tn = net_generic(net, tipc_net_id);
2816 struct rhashtable_iter iter;
2817 struct tipc_sock *tsk;
2818 struct tipc_msg *msg;
2820 rhashtable_walk_enter(&tn->sk_rht, &iter);
2823 rhashtable_walk_start(&iter);
2825 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2826 sock_hold(&tsk->sk);
2827 rhashtable_walk_stop(&iter);
2828 lock_sock(&tsk->sk);
2830 msg_set_prevnode(msg, tipc_own_addr(net));
2831 msg_set_orignode(msg, tipc_own_addr(net));
2832 release_sock(&tsk->sk);
2833 rhashtable_walk_start(&iter);
2837 rhashtable_walk_stop(&iter);
2838 } while (tsk == ERR_PTR(-EAGAIN));
2840 rhashtable_walk_exit(&iter);
2843 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2845 struct tipc_net *tn = net_generic(net, tipc_net_id);
2846 struct tipc_sock *tsk;
2849 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2851 sock_hold(&tsk->sk);
2857 static int tipc_sk_insert(struct tipc_sock *tsk)
2859 struct sock *sk = &tsk->sk;
2860 struct net *net = sock_net(sk);
2861 struct tipc_net *tn = net_generic(net, tipc_net_id);
2862 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2863 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2865 while (remaining--) {
2867 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2868 portid = TIPC_MIN_PORT;
2869 tsk->portid = portid;
2870 sock_hold(&tsk->sk);
2871 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2880 static void tipc_sk_remove(struct tipc_sock *tsk)
2882 struct sock *sk = &tsk->sk;
2883 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2885 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2886 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2891 static const struct rhashtable_params tsk_rht_params = {
2893 .head_offset = offsetof(struct tipc_sock, node),
2894 .key_offset = offsetof(struct tipc_sock, portid),
2895 .key_len = sizeof(u32), /* portid */
2896 .max_size = 1048576,
2898 .automatic_shrinking = true,
2901 int tipc_sk_rht_init(struct net *net)
2903 struct tipc_net *tn = net_generic(net, tipc_net_id);
2905 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2908 void tipc_sk_rht_destroy(struct net *net)
2910 struct tipc_net *tn = net_generic(net, tipc_net_id);
2912 /* Wait for socket readers to complete */
2915 rhashtable_destroy(&tn->sk_rht);
2918 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2920 struct net *net = sock_net(&tsk->sk);
2921 struct tipc_group *grp = tsk->group;
2922 struct tipc_msg *hdr = &tsk->phdr;
2923 struct tipc_name_seq seq;
2926 if (mreq->type < TIPC_RESERVED_TYPES)
2928 if (mreq->scope > TIPC_NODE_SCOPE)
2932 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2936 msg_set_lookup_scope(hdr, mreq->scope);
2937 msg_set_nametype(hdr, mreq->type);
2938 msg_set_dest_droppable(hdr, true);
2939 seq.type = mreq->type;
2940 seq.lower = mreq->instance;
2941 seq.upper = seq.lower;
2942 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2943 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2945 tipc_group_delete(net, grp);
2949 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2950 tsk->mc_method.rcast = true;
2951 tsk->mc_method.mandatory = true;
2952 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2956 static int tipc_sk_leave(struct tipc_sock *tsk)
2958 struct net *net = sock_net(&tsk->sk);
2959 struct tipc_group *grp = tsk->group;
2960 struct tipc_name_seq seq;
2965 tipc_group_self(grp, &seq, &scope);
2966 tipc_group_delete(net, grp);
2968 tipc_sk_withdraw(tsk, scope, &seq);
2973 * tipc_setsockopt - set socket option
2974 * @sock: socket structure
2975 * @lvl: option level
2976 * @opt: option identifier
2977 * @ov: pointer to new option value
2978 * @ol: length of option value
2980 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2981 * (to ease compatibility).
2983 * Returns 0 on success, errno otherwise
2985 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2986 char __user *ov, unsigned int ol)
2988 struct sock *sk = sock->sk;
2989 struct tipc_sock *tsk = tipc_sk(sk);
2990 struct tipc_group_req mreq;
2994 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2996 if (lvl != SOL_TIPC)
2997 return -ENOPROTOOPT;
3000 case TIPC_IMPORTANCE:
3001 case TIPC_SRC_DROPPABLE:
3002 case TIPC_DEST_DROPPABLE:
3003 case TIPC_CONN_TIMEOUT:
3004 if (ol < sizeof(value))
3006 if (get_user(value, (u32 __user *)ov))
3009 case TIPC_GROUP_JOIN:
3010 if (ol < sizeof(mreq))
3012 if (copy_from_user(&mreq, ov, sizeof(mreq)))
3023 case TIPC_IMPORTANCE:
3024 res = tsk_set_importance(tsk, value);
3026 case TIPC_SRC_DROPPABLE:
3027 if (sock->type != SOCK_STREAM)
3028 tsk_set_unreliable(tsk, value);
3032 case TIPC_DEST_DROPPABLE:
3033 tsk_set_unreturnable(tsk, value);
3035 case TIPC_CONN_TIMEOUT:
3036 tipc_sk(sk)->conn_timeout = value;
3038 case TIPC_MCAST_BROADCAST:
3039 tsk->mc_method.rcast = false;
3040 tsk->mc_method.mandatory = true;
3042 case TIPC_MCAST_REPLICAST:
3043 tsk->mc_method.rcast = true;
3044 tsk->mc_method.mandatory = true;
3046 case TIPC_GROUP_JOIN:
3047 res = tipc_sk_join(tsk, &mreq);
3049 case TIPC_GROUP_LEAVE:
3050 res = tipc_sk_leave(tsk);
3062 * tipc_getsockopt - get socket option
3063 * @sock: socket structure
3064 * @lvl: option level
3065 * @opt: option identifier
3066 * @ov: receptacle for option value
3067 * @ol: receptacle for length of option value
3069 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3070 * (to ease compatibility).
3072 * Returns 0 on success, errno otherwise
3074 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3075 char __user *ov, int __user *ol)
3077 struct sock *sk = sock->sk;
3078 struct tipc_sock *tsk = tipc_sk(sk);
3079 struct tipc_name_seq seq;
3084 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3085 return put_user(0, ol);
3086 if (lvl != SOL_TIPC)
3087 return -ENOPROTOOPT;
3088 res = get_user(len, ol);
3095 case TIPC_IMPORTANCE:
3096 value = tsk_importance(tsk);
3098 case TIPC_SRC_DROPPABLE:
3099 value = tsk_unreliable(tsk);
3101 case TIPC_DEST_DROPPABLE:
3102 value = tsk_unreturnable(tsk);
3104 case TIPC_CONN_TIMEOUT:
3105 value = tsk->conn_timeout;
3106 /* no need to set "res", since already 0 at this point */
3108 case TIPC_NODE_RECVQ_DEPTH:
3109 value = 0; /* was tipc_queue_size, now obsolete */
3111 case TIPC_SOCK_RECVQ_DEPTH:
3112 value = skb_queue_len(&sk->sk_receive_queue);
3114 case TIPC_SOCK_RECVQ_USED:
3115 value = sk_rmem_alloc_get(sk);
3117 case TIPC_GROUP_JOIN:
3120 tipc_group_self(tsk->group, &seq, &scope);
3130 return res; /* "get" failed */
3132 if (len < sizeof(value))
3135 if (copy_to_user(ov, &value, sizeof(value)))
3138 return put_user(sizeof(value), ol);
3141 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3143 struct net *net = sock_net(sock->sk);
3144 struct tipc_sioc_nodeid_req nr = {0};
3145 struct tipc_sioc_ln_req lnr;
3146 void __user *argp = (void __user *)arg;
3149 case SIOCGETLINKNAME:
3150 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3152 if (!tipc_node_get_linkname(net,
3153 lnr.bearer_id & 0xffff, lnr.peer,
3154 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3155 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3159 return -EADDRNOTAVAIL;
3161 if (copy_from_user(&nr, argp, sizeof(nr)))
3163 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3164 return -EADDRNOTAVAIL;
3165 if (copy_to_user(argp, &nr, sizeof(nr)))
3169 return -ENOIOCTLCMD;
3173 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3175 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3176 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3177 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3179 tsk1->peer.family = AF_TIPC;
3180 tsk1->peer.addrtype = TIPC_ADDR_ID;
3181 tsk1->peer.scope = TIPC_NODE_SCOPE;
3182 tsk1->peer.addr.id.ref = tsk2->portid;
3183 tsk1->peer.addr.id.node = onode;
3184 tsk2->peer.family = AF_TIPC;
3185 tsk2->peer.addrtype = TIPC_ADDR_ID;
3186 tsk2->peer.scope = TIPC_NODE_SCOPE;
3187 tsk2->peer.addr.id.ref = tsk1->portid;
3188 tsk2->peer.addr.id.node = onode;
3190 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3191 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3195 /* Protocol switches for the various types of TIPC sockets */
3197 static const struct proto_ops msg_ops = {
3198 .owner = THIS_MODULE,
3200 .release = tipc_release,
3202 .connect = tipc_connect,
3203 .socketpair = tipc_socketpair,
3204 .accept = sock_no_accept,
3205 .getname = tipc_getname,
3207 .ioctl = tipc_ioctl,
3208 .listen = sock_no_listen,
3209 .shutdown = tipc_shutdown,
3210 .setsockopt = tipc_setsockopt,
3211 .getsockopt = tipc_getsockopt,
3212 .sendmsg = tipc_sendmsg,
3213 .recvmsg = tipc_recvmsg,
3214 .mmap = sock_no_mmap,
3215 .sendpage = sock_no_sendpage
3218 static const struct proto_ops packet_ops = {
3219 .owner = THIS_MODULE,
3221 .release = tipc_release,
3223 .connect = tipc_connect,
3224 .socketpair = tipc_socketpair,
3225 .accept = tipc_accept,
3226 .getname = tipc_getname,
3228 .ioctl = tipc_ioctl,
3229 .listen = tipc_listen,
3230 .shutdown = tipc_shutdown,
3231 .setsockopt = tipc_setsockopt,
3232 .getsockopt = tipc_getsockopt,
3233 .sendmsg = tipc_send_packet,
3234 .recvmsg = tipc_recvmsg,
3235 .mmap = sock_no_mmap,
3236 .sendpage = sock_no_sendpage
3239 static const struct proto_ops stream_ops = {
3240 .owner = THIS_MODULE,
3242 .release = tipc_release,
3244 .connect = tipc_connect,
3245 .socketpair = tipc_socketpair,
3246 .accept = tipc_accept,
3247 .getname = tipc_getname,
3249 .ioctl = tipc_ioctl,
3250 .listen = tipc_listen,
3251 .shutdown = tipc_shutdown,
3252 .setsockopt = tipc_setsockopt,
3253 .getsockopt = tipc_getsockopt,
3254 .sendmsg = tipc_sendstream,
3255 .recvmsg = tipc_recvstream,
3256 .mmap = sock_no_mmap,
3257 .sendpage = sock_no_sendpage
3260 static const struct net_proto_family tipc_family_ops = {
3261 .owner = THIS_MODULE,
3263 .create = tipc_sk_create
3266 static struct proto tipc_proto = {
3268 .owner = THIS_MODULE,
3269 .obj_size = sizeof(struct tipc_sock),
3270 .sysctl_rmem = sysctl_tipc_rmem
3274 * tipc_socket_init - initialize TIPC socket interface
3276 * Returns 0 on success, errno otherwise
3278 int tipc_socket_init(void)
3282 res = proto_register(&tipc_proto, 1);
3284 pr_err("Failed to register TIPC protocol type\n");
3288 res = sock_register(&tipc_family_ops);
3290 pr_err("Failed to register TIPC socket type\n");
3291 proto_unregister(&tipc_proto);
3299 * tipc_socket_stop - stop TIPC socket interface
3301 void tipc_socket_stop(void)
3303 sock_unregister(tipc_family_ops.family);
3304 proto_unregister(&tipc_proto);
3307 /* Caller should hold socket lock for the passed tipc socket. */
3308 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3312 struct nlattr *nest;
3314 peer_node = tsk_peer_node(tsk);
3315 peer_port = tsk_peer_port(tsk);
3317 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3321 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3323 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3326 if (tsk->conn_type != 0) {
3327 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3329 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3331 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3334 nla_nest_end(skb, nest);
3339 nla_nest_cancel(skb, nest);
3344 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3347 struct net *net = sock_net(skb->sk);
3348 struct sock *sk = &tsk->sk;
3350 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3351 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3354 if (tipc_sk_connected(sk)) {
3355 if (__tipc_nl_add_sk_con(skb, tsk))
3357 } else if (!list_empty(&tsk->publications)) {
3358 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3364 /* Caller should hold socket lock for the passed tipc socket. */
3365 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3366 struct tipc_sock *tsk)
3368 struct nlattr *attrs;
3371 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3372 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3376 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3378 goto genlmsg_cancel;
3380 if (__tipc_nl_add_sk_info(skb, tsk))
3381 goto attr_msg_cancel;
3383 nla_nest_end(skb, attrs);
3384 genlmsg_end(skb, hdr);
3389 nla_nest_cancel(skb, attrs);
3391 genlmsg_cancel(skb, hdr);
3396 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3397 int (*skb_handler)(struct sk_buff *skb,
3398 struct netlink_callback *cb,
3399 struct tipc_sock *tsk))
3401 struct rhashtable_iter *iter = (void *)cb->args[4];
3402 struct tipc_sock *tsk;
3405 rhashtable_walk_start(iter);
3406 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3409 if (err == -EAGAIN) {
3416 sock_hold(&tsk->sk);
3417 rhashtable_walk_stop(iter);
3418 lock_sock(&tsk->sk);
3419 err = skb_handler(skb, cb, tsk);
3421 release_sock(&tsk->sk);
3425 release_sock(&tsk->sk);
3426 rhashtable_walk_start(iter);
3429 rhashtable_walk_stop(iter);
3433 EXPORT_SYMBOL(tipc_nl_sk_walk);
3435 int tipc_dump_start(struct netlink_callback *cb)
3437 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3439 EXPORT_SYMBOL(tipc_dump_start);
3441 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3443 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3444 struct rhashtable_iter *iter = (void *)cb->args[4];
3445 struct tipc_net *tn = tipc_net(net);
3448 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3452 cb->args[4] = (long)iter;
3455 rhashtable_walk_enter(&tn->sk_rht, iter);
3459 int tipc_dump_done(struct netlink_callback *cb)
3461 struct rhashtable_iter *hti = (void *)cb->args[4];
3463 rhashtable_walk_exit(hti);
3467 EXPORT_SYMBOL(tipc_dump_done);
3469 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3470 struct tipc_sock *tsk, u32 sk_filter_state,
3471 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3473 struct sock *sk = &tsk->sk;
3474 struct nlattr *attrs;
3475 struct nlattr *stat;
3477 /*filter response w.r.t sk_state*/
3478 if (!(sk_filter_state & (1 << sk->sk_state)))
3481 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3485 if (__tipc_nl_add_sk_info(skb, tsk))
3486 goto attr_msg_cancel;
3488 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3489 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3490 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3491 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3492 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3494 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3495 tipc_diag_gen_cookie(sk),
3497 goto attr_msg_cancel;
3499 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3501 goto attr_msg_cancel;
3503 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3504 skb_queue_len(&sk->sk_receive_queue)) ||
3505 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3506 skb_queue_len(&sk->sk_write_queue)) ||
3507 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3508 atomic_read(&sk->sk_drops)))
3509 goto stat_msg_cancel;
3511 if (tsk->cong_link_cnt &&
3512 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3513 goto stat_msg_cancel;
3515 if (tsk_conn_cong(tsk) &&
3516 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3517 goto stat_msg_cancel;
3519 nla_nest_end(skb, stat);
3522 if (tipc_group_fill_sock_diag(tsk->group, skb))
3523 goto stat_msg_cancel;
3525 nla_nest_end(skb, attrs);
3530 nla_nest_cancel(skb, stat);
3532 nla_nest_cancel(skb, attrs);
3536 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3538 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3540 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3543 /* Caller should hold socket lock for the passed tipc socket. */
3544 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3545 struct netlink_callback *cb,
3546 struct publication *publ)
3549 struct nlattr *attrs;
3551 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3552 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3556 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3558 goto genlmsg_cancel;
3560 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3561 goto attr_msg_cancel;
3562 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3563 goto attr_msg_cancel;
3564 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3565 goto attr_msg_cancel;
3566 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3567 goto attr_msg_cancel;
3569 nla_nest_end(skb, attrs);
3570 genlmsg_end(skb, hdr);
3575 nla_nest_cancel(skb, attrs);
3577 genlmsg_cancel(skb, hdr);
3582 /* Caller should hold socket lock for the passed tipc socket. */
3583 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3584 struct netlink_callback *cb,
3585 struct tipc_sock *tsk, u32 *last_publ)
3588 struct publication *p;
3591 list_for_each_entry(p, &tsk->publications, binding_sock) {
3592 if (p->key == *last_publ)
3595 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3596 /* We never set seq or call nl_dump_check_consistent()
3597 * this means that setting prev_seq here will cause the
3598 * consistence check to fail in the netlink callback
3599 * handler. Resulting in the last NLMSG_DONE message
3600 * having the NLM_F_DUMP_INTR flag set.
3607 p = list_first_entry(&tsk->publications, struct publication,
3611 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3612 err = __tipc_nl_add_sk_publ(skb, cb, p);
3614 *last_publ = p->key;
3623 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3626 u32 tsk_portid = cb->args[0];
3627 u32 last_publ = cb->args[1];
3628 u32 done = cb->args[2];
3629 struct net *net = sock_net(skb->sk);
3630 struct tipc_sock *tsk;
3633 struct nlattr **attrs;
3634 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3636 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3640 if (!attrs[TIPC_NLA_SOCK])
3643 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3644 attrs[TIPC_NLA_SOCK],
3645 tipc_nl_sock_policy, NULL);
3649 if (!sock[TIPC_NLA_SOCK_REF])
3652 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3658 tsk = tipc_sk_lookup(net, tsk_portid);
3662 lock_sock(&tsk->sk);
3663 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3666 release_sock(&tsk->sk);
3669 cb->args[0] = tsk_portid;
3670 cb->args[1] = last_publ;
3677 * tipc_sk_filtering - check if a socket should be traced
3678 * @sk: the socket to be examined
3679 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3680 * (portid, sock type, name type, name lower, name upper)
3682 * Returns true if the socket meets the socket tuple data
3683 * (value 0 = 'any') or when there is no tuple set (all = 0),
3686 bool tipc_sk_filtering(struct sock *sk)
3688 struct tipc_sock *tsk;
3689 struct publication *p;
3690 u32 _port, _sktype, _type, _lower, _upper;
3691 u32 type = 0, lower = 0, upper = 0;
3698 _port = sysctl_tipc_sk_filter[0];
3699 _sktype = sysctl_tipc_sk_filter[1];
3700 _type = sysctl_tipc_sk_filter[2];
3701 _lower = sysctl_tipc_sk_filter[3];
3702 _upper = sysctl_tipc_sk_filter[4];
3704 if (!_port && !_sktype && !_type && !_lower && !_upper)
3708 return (_port == tsk->portid);
3710 if (_sktype && _sktype != sk->sk_type)
3713 if (tsk->published) {
3714 p = list_first_entry_or_null(&tsk->publications,
3715 struct publication, binding_sock);
3723 if (!tipc_sk_type_connectionless(sk)) {
3724 type = tsk->conn_type;
3725 lower = tsk->conn_instance;
3726 upper = tsk->conn_instance;
3729 if ((_type && _type != type) || (_lower && _lower != lower) ||
3730 (_upper && _upper != upper))
3736 u32 tipc_sock_get_portid(struct sock *sk)
3738 return (sk) ? (tipc_sk(sk))->portid : 0;
3742 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3743 * both the rcv and backlog queues are considered
3744 * @sk: tipc sk to be checked
3745 * @skb: tipc msg to be checked
3747 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3750 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3752 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3753 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3754 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3756 return (qsize > lim * 90 / 100);
3760 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3761 * only the rcv queue is considered
3762 * @sk: tipc sk to be checked
3763 * @skb: tipc msg to be checked
3765 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3768 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3770 unsigned int lim = rcvbuf_limit(sk, skb);
3771 unsigned int qsize = sk_rmem_alloc_get(sk);
3773 return (qsize > lim * 90 / 100);
3777 * tipc_sk_dump - dump TIPC socket
3778 * @sk: tipc sk to be dumped
3779 * @dqueues: bitmask to decide if any socket queue to be dumped?
3780 * - TIPC_DUMP_NONE: don't dump socket queues
3781 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3782 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3783 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3784 * - TIPC_DUMP_ALL: dump all the socket queues above
3785 * @buf: returned buffer of dump data in format
3787 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3790 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3791 struct tipc_sock *tsk;
3792 struct publication *p;
3796 i += scnprintf(buf, sz, "sk data: (null)\n");
3801 tsk_connected = !tipc_sk_type_connectionless(sk);
3803 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3804 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3805 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3806 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3807 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3808 if (tsk_connected) {
3809 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3810 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3811 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3812 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3814 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3815 if (tsk->published) {
3816 p = list_first_entry_or_null(&tsk->publications,
3817 struct publication, binding_sock);
3818 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3819 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3820 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3822 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3823 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3824 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3825 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3826 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3827 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3828 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3829 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3830 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3831 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3832 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3833 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3834 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3835 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3837 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3838 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3839 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3842 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3843 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3844 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3847 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3848 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3849 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3850 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3851 i += scnprintf(buf + i, sz - i, " tail ");
3852 i += tipc_skb_dump(sk->sk_backlog.tail, false,