2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <net/inet6_hashtables.h>
39 #include <net/addrconf.h>
44 #define RDS_CONNECTION_HASH_BITS 12
45 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
46 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
48 /* converting this to RCU is a chore for another day.. */
49 static DEFINE_SPINLOCK(rds_conn_lock);
50 static unsigned long rds_conn_count;
51 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
52 static struct kmem_cache *rds_conn_slab;
54 static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
55 const struct in6_addr *faddr)
57 static u32 rds6_hash_secret __read_mostly;
58 static u32 rds_hash_secret __read_mostly;
60 u32 lhash, fhash, hash;
62 net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
63 net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));
65 lhash = (__force u32)laddr->s6_addr32[3];
66 #if IS_ENABLED(CONFIG_IPV6)
67 fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
69 fhash = (__force u32)faddr->s6_addr32[3];
71 hash = __inet_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
73 return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
76 #define rds_conn_info_set(var, test, suffix) do { \
78 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
81 /* rcu read lock must be held or the connection spinlock */
82 static struct rds_connection *rds_conn_lookup(struct net *net,
83 struct hlist_head *head,
84 const struct in6_addr *laddr,
85 const struct in6_addr *faddr,
86 struct rds_transport *trans,
89 struct rds_connection *conn, *ret = NULL;
91 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
92 if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
93 ipv6_addr_equal(&conn->c_laddr, laddr) &&
94 conn->c_trans == trans &&
95 net == rds_conn_net(conn) &&
96 conn->c_dev_if == dev_if) {
101 rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
107 * This is called by transports as they're bringing down a connection.
108 * It clears partial message state so that the transport can start sending
109 * and receiving over this connection again in the future. It is up to
110 * the transport to have serialized this call with its send and recv.
112 static void rds_conn_path_reset(struct rds_conn_path *cp)
114 struct rds_connection *conn = cp->cp_conn;
116 rdsdebug("connection %pI6c to %pI6c reset\n",
117 &conn->c_laddr, &conn->c_faddr);
119 rds_stats_inc(s_conn_reset);
120 rds_send_path_reset(cp);
123 /* Do not clear next_rx_seq here, else we cannot distinguish
124 * retransmitted packets from new packets, and will hand all
125 * of them to the application. That is not consistent with the
126 * reliability guarantees of RDS. */
129 static void __rds_conn_path_init(struct rds_connection *conn,
130 struct rds_conn_path *cp, bool is_outgoing)
132 spin_lock_init(&cp->cp_lock);
133 cp->cp_next_tx_seq = 1;
134 init_waitqueue_head(&cp->cp_waitq);
135 INIT_LIST_HEAD(&cp->cp_send_queue);
136 INIT_LIST_HEAD(&cp->cp_retrans);
139 atomic_set(&cp->cp_state, RDS_CONN_DOWN);
141 cp->cp_reconnect_jiffies = 0;
142 INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
143 INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
144 INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
145 INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
146 mutex_init(&cp->cp_cm_lock);
151 * There is only every one 'conn' for a given pair of addresses in the
152 * system at a time. They contain messages to be retransmitted and so
153 * span the lifetime of the actual underlying transport connections.
155 * For now they are not garbage collected once they're created. They
156 * are torn down as the module is removed, if ever.
158 static struct rds_connection *__rds_conn_create(struct net *net,
159 const struct in6_addr *laddr,
160 const struct in6_addr *faddr,
161 struct rds_transport *trans,
166 struct rds_connection *conn, *parent = NULL;
167 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
168 struct rds_transport *loop_trans;
171 int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
174 conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
177 conn->c_trans != &rds_loop_transport &&
178 ipv6_addr_equal(laddr, faddr) &&
180 /* This is a looped back IB connection, and we're
181 * called by the code handling the incoming connect.
182 * We need a second connection object into which we
183 * can stick the other QP. */
185 conn = parent->c_passive;
191 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
193 conn = ERR_PTR(-ENOMEM);
196 conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
198 kmem_cache_free(rds_conn_slab, conn);
199 conn = ERR_PTR(-ENOMEM);
203 INIT_HLIST_NODE(&conn->c_hash_node);
204 conn->c_laddr = *laddr;
205 conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
206 conn->c_faddr = *faddr;
207 conn->c_dev_if = dev_if;
209 #if IS_ENABLED(CONFIG_IPV6)
210 /* If the local address is link local, set c_bound_if to be the
211 * index used for this connection. Otherwise, set it to 0 as
212 * the socket is not bound to an interface. c_bound_if is used
213 * to look up a socket when a packet is received
215 if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
216 conn->c_bound_if = dev_if;
219 conn->c_bound_if = 0;
221 rds_conn_net_set(conn, net);
223 ret = rds_cong_get_maps(conn);
226 kmem_cache_free(rds_conn_slab, conn);
232 * This is where a connection becomes loopback. If *any* RDS sockets
233 * can bind to the destination address then we'd rather the messages
234 * flow through loopback rather than either transport.
236 loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
238 rds_trans_put(loop_trans);
239 conn->c_loopback = 1;
240 if (trans->t_prefer_loopback) {
241 if (likely(is_outgoing)) {
242 /* "outgoing" connection to local address.
243 * Protocol says it wants the connection
244 * handled by the loopback transport.
245 * This is what TCP does.
247 trans = &rds_loop_transport;
249 /* No transport currently in use
250 * should end up here, but if it
251 * does, reset/destroy the connection.
254 kmem_cache_free(rds_conn_slab, conn);
255 conn = ERR_PTR(-EOPNOTSUPP);
261 conn->c_trans = trans;
263 init_waitqueue_head(&conn->c_hs_waitq);
264 for (i = 0; i < npaths; i++) {
265 __rds_conn_path_init(conn, &conn->c_path[i],
267 conn->c_path[i].cp_index = i;
270 if (rds_destroy_pending(conn))
273 ret = trans->conn_alloc(conn, GFP_ATOMIC);
277 kmem_cache_free(rds_conn_slab, conn);
282 rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
284 strnlen(trans->t_name, sizeof(trans->t_name)) ?
285 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
288 * Since we ran without holding the conn lock, someone could
289 * have created the same conn (either normal or passive) in the
290 * interim. We check while holding the lock. If we won, we complete
291 * init and return our conn. If we lost, we rollback and return the
294 spin_lock_irqsave(&rds_conn_lock, flags);
296 /* Creating passive conn */
297 if (parent->c_passive) {
298 trans->conn_free(conn->c_path[0].cp_transport_data);
300 kmem_cache_free(rds_conn_slab, conn);
301 conn = parent->c_passive;
303 parent->c_passive = conn;
304 rds_cong_add_conn(conn);
308 /* Creating normal conn */
309 struct rds_connection *found;
311 found = rds_conn_lookup(net, head, laddr, faddr, trans,
314 struct rds_conn_path *cp;
317 for (i = 0; i < npaths; i++) {
318 cp = &conn->c_path[i];
319 /* The ->conn_alloc invocation may have
320 * allocated resource for all paths, so all
321 * of them may have to be freed here.
323 if (cp->cp_transport_data)
324 trans->conn_free(cp->cp_transport_data);
327 kmem_cache_free(rds_conn_slab, conn);
330 conn->c_my_gen_num = rds_gen_num;
331 conn->c_peer_gen_num = 0;
332 hlist_add_head_rcu(&conn->c_hash_node, head);
333 rds_cong_add_conn(conn);
337 spin_unlock_irqrestore(&rds_conn_lock, flags);
344 struct rds_connection *rds_conn_create(struct net *net,
345 const struct in6_addr *laddr,
346 const struct in6_addr *faddr,
347 struct rds_transport *trans, gfp_t gfp,
350 return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
352 EXPORT_SYMBOL_GPL(rds_conn_create);
354 struct rds_connection *rds_conn_create_outgoing(struct net *net,
355 const struct in6_addr *laddr,
356 const struct in6_addr *faddr,
357 struct rds_transport *trans,
358 gfp_t gfp, int dev_if)
360 return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
362 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
364 void rds_conn_shutdown(struct rds_conn_path *cp)
366 struct rds_connection *conn = cp->cp_conn;
368 /* shut it down unless it's down already */
369 if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
371 * Quiesce the connection mgmt handlers before we start tearing
372 * things down. We don't hold the mutex for the entire
373 * duration of the shutdown operation, else we may be
374 * deadlocking with the CM handler. Instead, the CM event
375 * handler is supposed to check for state DISCONNECTING
377 mutex_lock(&cp->cp_cm_lock);
378 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
379 RDS_CONN_DISCONNECTING) &&
380 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
381 RDS_CONN_DISCONNECTING)) {
382 rds_conn_path_error(cp,
383 "shutdown called in state %d\n",
384 atomic_read(&cp->cp_state));
385 mutex_unlock(&cp->cp_cm_lock);
388 mutex_unlock(&cp->cp_cm_lock);
390 wait_event(cp->cp_waitq,
391 !test_bit(RDS_IN_XMIT, &cp->cp_flags));
392 wait_event(cp->cp_waitq,
393 !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
395 conn->c_trans->conn_path_shutdown(cp);
396 rds_conn_path_reset(cp);
398 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
400 !rds_conn_path_transition(cp, RDS_CONN_ERROR,
402 /* This can happen - eg when we're in the middle of tearing
403 * down the connection, and someone unloads the rds module.
404 * Quite reproducible with loopback connections.
407 * Note that this also happens with rds-tcp because
408 * we could have triggered rds_conn_path_drop in irq
409 * mode from rds_tcp_state change on the receipt of
410 * a FIN, thus we need to recheck for RDS_CONN_ERROR
413 rds_conn_path_error(cp, "%s: failed to transition "
414 "to state DOWN, current state "
416 atomic_read(&cp->cp_state));
421 /* Then reconnect if it's still live.
422 * The passive side of an IB loopback connection is never added
423 * to the conn hash, so we never trigger a reconnect on this
424 * conn - the reconnect is always triggered by the active peer. */
425 cancel_delayed_work_sync(&cp->cp_conn_w);
427 if (!hlist_unhashed(&conn->c_hash_node)) {
429 rds_queue_reconnect(cp);
435 /* destroy a single rds_conn_path. rds_conn_destroy() iterates over
436 * all paths using rds_conn_path_destroy()
438 static void rds_conn_path_destroy(struct rds_conn_path *cp)
440 struct rds_message *rm, *rtmp;
442 if (!cp->cp_transport_data)
445 /* make sure lingering queued work won't try to ref the conn */
446 cancel_delayed_work_sync(&cp->cp_send_w);
447 cancel_delayed_work_sync(&cp->cp_recv_w);
449 rds_conn_path_drop(cp, true);
450 flush_work(&cp->cp_down_w);
452 /* tear down queued messages */
453 list_for_each_entry_safe(rm, rtmp,
456 list_del_init(&rm->m_conn_item);
457 BUG_ON(!list_empty(&rm->m_sock_item));
461 rds_message_put(cp->cp_xmit_rm);
463 WARN_ON(delayed_work_pending(&cp->cp_send_w));
464 WARN_ON(delayed_work_pending(&cp->cp_recv_w));
465 WARN_ON(delayed_work_pending(&cp->cp_conn_w));
466 WARN_ON(work_pending(&cp->cp_down_w));
468 cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
472 * Stop and free a connection.
474 * This can only be used in very limited circumstances. It assumes that once
475 * the conn has been shutdown that no one else is referencing the connection.
476 * We can only ensure this in the rmmod path in the current code.
478 void rds_conn_destroy(struct rds_connection *conn)
482 struct rds_conn_path *cp;
483 int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
485 rdsdebug("freeing conn %p for %pI4 -> "
486 "%pI4\n", conn, &conn->c_laddr,
489 /* Ensure conn will not be scheduled for reconnect */
490 spin_lock_irq(&rds_conn_lock);
491 hlist_del_init_rcu(&conn->c_hash_node);
492 spin_unlock_irq(&rds_conn_lock);
495 /* shut the connection down */
496 for (i = 0; i < npaths; i++) {
497 cp = &conn->c_path[i];
498 rds_conn_path_destroy(cp);
499 BUG_ON(!list_empty(&cp->cp_retrans));
503 * The congestion maps aren't freed up here. They're
504 * freed by rds_cong_exit() after all the connections
507 rds_cong_remove_conn(conn);
510 kmem_cache_free(rds_conn_slab, conn);
512 spin_lock_irqsave(&rds_conn_lock, flags);
514 spin_unlock_irqrestore(&rds_conn_lock, flags);
516 EXPORT_SYMBOL_GPL(rds_conn_destroy);
518 static void __rds_inc_msg_cp(struct rds_incoming *inc,
519 struct rds_info_iterator *iter,
520 void *saddr, void *daddr, int flip, bool isv6)
522 #if IS_ENABLED(CONFIG_IPV6)
524 rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
527 rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
528 *(__be32 *)daddr, flip);
531 static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
532 struct rds_info_iterator *iter,
533 struct rds_info_lengths *lens,
534 int want_send, bool isv6)
536 struct hlist_head *head;
537 struct list_head *list;
538 struct rds_connection *conn;
539 struct rds_message *rm;
540 unsigned int total = 0;
546 len /= sizeof(struct rds6_info_message);
548 len /= sizeof(struct rds_info_message);
552 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
554 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
555 struct rds_conn_path *cp;
558 if (!isv6 && conn->c_isv6)
561 npaths = (conn->c_trans->t_mp_capable ?
562 RDS_MPATH_WORKERS : 1);
564 for (j = 0; j < npaths; j++) {
565 cp = &conn->c_path[j];
567 list = &cp->cp_send_queue;
569 list = &cp->cp_retrans;
571 spin_lock_irqsave(&cp->cp_lock, flags);
573 /* XXX too lazy to maintain counts.. */
574 list_for_each_entry(rm, list, m_conn_item) {
577 __rds_inc_msg_cp(&rm->m_inc,
584 spin_unlock_irqrestore(&cp->cp_lock, flags);
592 lens->each = sizeof(struct rds6_info_message);
594 lens->each = sizeof(struct rds_info_message);
597 static void rds_conn_message_info(struct socket *sock, unsigned int len,
598 struct rds_info_iterator *iter,
599 struct rds_info_lengths *lens,
602 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
605 #if IS_ENABLED(CONFIG_IPV6)
606 static void rds6_conn_message_info(struct socket *sock, unsigned int len,
607 struct rds_info_iterator *iter,
608 struct rds_info_lengths *lens,
611 rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
615 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
616 struct rds_info_iterator *iter,
617 struct rds_info_lengths *lens)
619 rds_conn_message_info(sock, len, iter, lens, 1);
622 #if IS_ENABLED(CONFIG_IPV6)
623 static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
624 struct rds_info_iterator *iter,
625 struct rds_info_lengths *lens)
627 rds6_conn_message_info(sock, len, iter, lens, 1);
631 static void rds_conn_message_info_retrans(struct socket *sock,
633 struct rds_info_iterator *iter,
634 struct rds_info_lengths *lens)
636 rds_conn_message_info(sock, len, iter, lens, 0);
639 #if IS_ENABLED(CONFIG_IPV6)
640 static void rds6_conn_message_info_retrans(struct socket *sock,
642 struct rds_info_iterator *iter,
643 struct rds_info_lengths *lens)
645 rds6_conn_message_info(sock, len, iter, lens, 0);
649 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
650 struct rds_info_iterator *iter,
651 struct rds_info_lengths *lens,
652 int (*visitor)(struct rds_connection *, void *),
656 struct hlist_head *head;
657 struct rds_connection *conn;
663 lens->each = item_len;
665 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
667 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
669 /* XXX no c_lock usage.. */
670 if (!visitor(conn, buffer))
673 /* We copy as much as we can fit in the buffer,
674 * but we count all items so that the caller
675 * can resize the buffer. */
676 if (len >= item_len) {
677 rds_info_copy(iter, buffer, item_len);
685 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
687 static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
688 struct rds_info_iterator *iter,
689 struct rds_info_lengths *lens,
690 int (*visitor)(struct rds_conn_path *, void *),
694 struct hlist_head *head;
695 struct rds_connection *conn;
701 lens->each = item_len;
703 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
705 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
706 struct rds_conn_path *cp;
708 /* XXX We only copy the information from the first
709 * path for now. The problem is that if there are
710 * more than one underlying paths, we cannot report
711 * information of all of them using the existing
712 * API. For example, there is only one next_tx_seq,
713 * which path's next_tx_seq should we report? It is
714 * a bug in the design of MPRDS.
718 /* XXX no cp_lock usage.. */
719 if (!visitor(cp, buffer))
722 /* We copy as much as we can fit in the buffer,
723 * but we count all items so that the caller
724 * can resize the buffer.
726 if (len >= item_len) {
727 rds_info_copy(iter, buffer, item_len);
736 static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
738 struct rds_info_connection *cinfo = buffer;
739 struct rds_connection *conn = cp->cp_conn;
744 cinfo->next_tx_seq = cp->cp_next_tx_seq;
745 cinfo->next_rx_seq = cp->cp_next_rx_seq;
746 cinfo->laddr = conn->c_laddr.s6_addr32[3];
747 cinfo->faddr = conn->c_faddr.s6_addr32[3];
748 strncpy(cinfo->transport, conn->c_trans->t_name,
749 sizeof(cinfo->transport));
752 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
754 /* XXX Future: return the state rather than these funky bits */
755 rds_conn_info_set(cinfo->flags,
756 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
758 rds_conn_info_set(cinfo->flags,
759 atomic_read(&cp->cp_state) == RDS_CONN_UP,
764 #if IS_ENABLED(CONFIG_IPV6)
765 static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
767 struct rds6_info_connection *cinfo6 = buffer;
768 struct rds_connection *conn = cp->cp_conn;
770 cinfo6->next_tx_seq = cp->cp_next_tx_seq;
771 cinfo6->next_rx_seq = cp->cp_next_rx_seq;
772 cinfo6->laddr = conn->c_laddr;
773 cinfo6->faddr = conn->c_faddr;
774 strncpy(cinfo6->transport, conn->c_trans->t_name,
775 sizeof(cinfo6->transport));
778 rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
780 /* XXX Future: return the state rather than these funky bits */
781 rds_conn_info_set(cinfo6->flags,
782 atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
784 rds_conn_info_set(cinfo6->flags,
785 atomic_read(&cp->cp_state) == RDS_CONN_UP,
787 /* Just return 1 as there is no error case. This is a helper function
788 * for rds_walk_conn_path_info() and it wants a return value.
794 static void rds_conn_info(struct socket *sock, unsigned int len,
795 struct rds_info_iterator *iter,
796 struct rds_info_lengths *lens)
798 u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8];
800 rds_walk_conn_path_info(sock, len, iter, lens,
801 rds_conn_info_visitor,
803 sizeof(struct rds_info_connection));
806 #if IS_ENABLED(CONFIG_IPV6)
807 static void rds6_conn_info(struct socket *sock, unsigned int len,
808 struct rds_info_iterator *iter,
809 struct rds_info_lengths *lens)
811 u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];
813 rds_walk_conn_path_info(sock, len, iter, lens,
814 rds6_conn_info_visitor,
816 sizeof(struct rds6_info_connection));
820 int rds_conn_init(void)
824 ret = rds_loop_net_init(); /* register pernet callback */
828 rds_conn_slab = kmem_cache_create("rds_connection",
829 sizeof(struct rds_connection),
831 if (!rds_conn_slab) {
836 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
837 rds_info_register_func(RDS_INFO_SEND_MESSAGES,
838 rds_conn_message_info_send);
839 rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
840 rds_conn_message_info_retrans);
841 #if IS_ENABLED(CONFIG_IPV6)
842 rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
843 rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
844 rds6_conn_message_info_send);
845 rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
846 rds6_conn_message_info_retrans);
851 void rds_conn_exit(void)
853 rds_loop_net_exit(); /* unregister pernet callback */
856 WARN_ON(!hlist_empty(rds_conn_hash));
858 kmem_cache_destroy(rds_conn_slab);
860 rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
861 rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
862 rds_conn_message_info_send);
863 rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
864 rds_conn_message_info_retrans);
865 #if IS_ENABLED(CONFIG_IPV6)
866 rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
867 rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
868 rds6_conn_message_info_send);
869 rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
870 rds6_conn_message_info_retrans);
877 void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
879 atomic_set(&cp->cp_state, RDS_CONN_ERROR);
882 if (!destroy && rds_destroy_pending(cp->cp_conn)) {
886 queue_work(rds_wq, &cp->cp_down_w);
889 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
891 void rds_conn_drop(struct rds_connection *conn)
893 WARN_ON(conn->c_trans->t_mp_capable);
894 rds_conn_path_drop(&conn->c_path[0], false);
896 EXPORT_SYMBOL_GPL(rds_conn_drop);
899 * If the connection is down, trigger a connect. We may have scheduled a
900 * delayed reconnect however - in this case we should not interfere.
902 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
905 if (rds_destroy_pending(cp->cp_conn)) {
909 if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
910 !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
911 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
914 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
916 void rds_conn_connect_if_down(struct rds_connection *conn)
918 WARN_ON(conn->c_trans->t_mp_capable);
919 rds_conn_path_connect_if_down(&conn->c_path[0]);
921 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
924 __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
932 rds_conn_path_drop(cp, false);