2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/time.h>
39 #include <linux/rds.h>
43 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
46 atomic_set(&inc->i_refcount, 1);
47 INIT_LIST_HEAD(&inc->i_item);
50 inc->i_rdma_cookie = 0;
51 inc->i_rx_tstamp.tv_sec = 0;
52 inc->i_rx_tstamp.tv_usec = 0;
54 EXPORT_SYMBOL_GPL(rds_inc_init);
56 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
59 atomic_set(&inc->i_refcount, 1);
60 INIT_LIST_HEAD(&inc->i_item);
61 inc->i_conn = cp->cp_conn;
62 inc->i_conn_path = cp;
64 inc->i_rdma_cookie = 0;
65 inc->i_rx_tstamp.tv_sec = 0;
66 inc->i_rx_tstamp.tv_usec = 0;
68 EXPORT_SYMBOL_GPL(rds_inc_path_init);
70 static void rds_inc_addref(struct rds_incoming *inc)
72 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
73 atomic_inc(&inc->i_refcount);
76 void rds_inc_put(struct rds_incoming *inc)
78 rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
79 if (atomic_dec_and_test(&inc->i_refcount)) {
80 BUG_ON(!list_empty(&inc->i_item));
82 inc->i_conn->c_trans->inc_free(inc);
85 EXPORT_SYMBOL_GPL(rds_inc_put);
87 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
88 struct rds_cong_map *map,
89 int delta, __be16 port)
96 rs->rs_rcv_bytes += delta;
98 /* loop transport doesn't send/recv congestion updates */
99 if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
102 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
104 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
105 "now_cong %d delta %d\n",
106 rs, &rs->rs_bound_addr,
107 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
108 rds_sk_rcvbuf(rs), now_congested, delta);
110 /* wasn't -> am congested */
111 if (!rs->rs_congested && now_congested) {
112 rs->rs_congested = 1;
113 rds_cong_set_bit(map, port);
114 rds_cong_queue_updates(map);
116 /* was -> aren't congested */
117 /* Require more free space before reporting uncongested to prevent
118 bouncing cong/uncong state too often */
119 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
120 rs->rs_congested = 0;
121 rds_cong_clear_bit(map, port);
122 rds_cong_queue_updates(map);
125 /* do nothing if no change in cong state */
129 * Process all extension headers that come with this message.
131 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
133 struct rds_header *hdr = &inc->i_hdr;
134 unsigned int pos = 0, type, len;
136 struct rds_ext_header_version version;
137 struct rds_ext_header_rdma rdma;
138 struct rds_ext_header_rdma_dest rdma_dest;
142 len = sizeof(buffer);
143 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
144 if (type == RDS_EXTHDR_NONE)
146 /* Process extension header here */
148 case RDS_EXTHDR_RDMA:
149 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
152 case RDS_EXTHDR_RDMA_DEST:
153 /* We ignore the size for now. We could stash it
154 * somewhere and use it for error checking. */
155 inc->i_rdma_cookie = rds_rdma_make_cookie(
156 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
157 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
164 static void rds_recv_hs_exthdrs(struct rds_header *hdr,
165 struct rds_connection *conn)
167 unsigned int pos = 0, type, len;
169 struct rds_ext_header_version version;
174 len = sizeof(buffer);
175 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
176 if (type == RDS_EXTHDR_NONE)
178 /* Process extension header here */
180 case RDS_EXTHDR_NPATHS:
181 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
185 pr_warn_ratelimited("ignoring unknown exthdr type "
189 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
190 conn->c_npaths = max_t(int, conn->c_npaths, 1);
193 /* rds_start_mprds() will synchronously start multiple paths when appropriate.
194 * The scheme is based on the following rules:
196 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
197 * sender's npaths (s_npaths)
198 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
199 * sends back a probe-pong with r_npaths. After that, if rcvr is the
200 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
202 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
203 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
204 * called after reception of the probe-pong on all mprds_paths.
205 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
206 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
207 * 4. when cp_index > 0, rds_connect_worker must only trigger
208 * a connection if laddr < faddr.
209 * 5. sender may end up queuing the packet on the cp. will get sent out later.
210 * when connection is completed.
212 static void rds_start_mprds(struct rds_connection *conn)
215 struct rds_conn_path *cp;
217 if (conn->c_npaths > 1 && conn->c_laddr < conn->c_faddr) {
218 for (i = 1; i < conn->c_npaths; i++) {
219 cp = &conn->c_path[i];
220 rds_conn_path_connect_if_down(cp);
226 * The transport must make sure that this is serialized against other
227 * rx and conn reset on this specific conn.
229 * We currently assert that only one fragmented message will be sent
230 * down a connection at a time. This lets us reassemble in the conn
231 * instead of per-flow which means that we don't have to go digging through
232 * flows to tear down partial reassembly progress on conn failure and
233 * we save flow lookup and locking for each frag arrival. It does mean
234 * that small messages will wait behind large ones. Fragmenting at all
235 * is only to reduce the memory consumption of pre-posted buffers.
237 * The caller passes in saddr and daddr instead of us getting it from the
238 * conn. This lets loopback, who only has one conn for both directions,
239 * tell us which roles the addrs in the conn are playing for this message.
241 void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
242 struct rds_incoming *inc, gfp_t gfp)
244 struct rds_sock *rs = NULL;
247 struct rds_conn_path *cp;
250 inc->i_rx_jiffies = jiffies;
251 if (conn->c_trans->t_mp_capable)
252 cp = inc->i_conn_path;
254 cp = &conn->c_path[0];
256 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
257 "flags 0x%x rx_jiffies %lu\n", conn,
258 (unsigned long long)cp->cp_next_rx_seq,
260 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
261 be32_to_cpu(inc->i_hdr.h_len),
262 be16_to_cpu(inc->i_hdr.h_sport),
263 be16_to_cpu(inc->i_hdr.h_dport),
268 * Sequence numbers should only increase. Messages get their
269 * sequence number as they're queued in a sending conn. They
270 * can be dropped, though, if the sending socket is closed before
271 * they hit the wire. So sequence numbers can skip forward
272 * under normal operation. They can also drop back in the conn
273 * failover case as previously sent messages are resent down the
274 * new instance of a conn. We drop those, otherwise we have
275 * to assume that the next valid seq does not come after a
276 * hole in the fragment stream.
278 * The headers don't give us a way to realize if fragments of
279 * a message have been dropped. We assume that frags that arrive
280 * to a flow are part of the current message on the flow that is
281 * being reassembled. This means that senders can't drop messages
282 * from the sending conn until all their frags are sent.
284 * XXX we could spend more on the wire to get more robust failure
285 * detection, arguably worth it to avoid data corruption.
287 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
288 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
289 rds_stats_inc(s_recv_drop_old_seq);
292 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
294 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
295 if (inc->i_hdr.h_sport == 0) {
296 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
299 rds_stats_inc(s_recv_ping);
300 rds_send_pong(cp, inc->i_hdr.h_sport);
301 /* if this is a handshake ping, start multipath if necessary */
302 if (RDS_HS_PROBE(inc->i_hdr.h_sport, inc->i_hdr.h_dport)) {
303 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
304 rds_start_mprds(cp->cp_conn);
309 if (inc->i_hdr.h_dport == RDS_FLAG_PROBE_PORT &&
310 inc->i_hdr.h_sport == 0) {
311 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
312 /* if this is a handshake pong, start multipath if necessary */
313 rds_start_mprds(cp->cp_conn);
314 wake_up(&cp->cp_conn->c_hs_waitq);
318 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
320 rds_stats_inc(s_recv_drop_no_sock);
324 /* Process extension headers */
325 rds_recv_incoming_exthdrs(inc, rs);
327 /* We can be racing with rds_release() which marks the socket dead. */
328 sk = rds_rs_to_sk(rs);
330 /* serialize with rds_release -> sock_orphan */
331 write_lock_irqsave(&rs->rs_recv_lock, flags);
332 if (!sock_flag(sk, SOCK_DEAD)) {
333 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
334 rds_stats_inc(s_recv_queued);
335 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
336 be32_to_cpu(inc->i_hdr.h_len),
338 if (sock_flag(sk, SOCK_RCVTSTAMP))
339 do_gettimeofday(&inc->i_rx_tstamp);
341 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
342 __rds_wake_sk_sleep(sk);
344 rds_stats_inc(s_recv_drop_dead_sock);
346 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
352 EXPORT_SYMBOL_GPL(rds_recv_incoming);
355 * be very careful here. This is being called as the condition in
356 * wait_event_*() needs to cope with being called many times.
358 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
363 read_lock_irqsave(&rs->rs_recv_lock, flags);
364 if (!list_empty(&rs->rs_recv_queue)) {
365 *inc = list_entry(rs->rs_recv_queue.next,
368 rds_inc_addref(*inc);
370 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
376 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
379 struct sock *sk = rds_rs_to_sk(rs);
383 write_lock_irqsave(&rs->rs_recv_lock, flags);
384 if (!list_empty(&inc->i_item)) {
387 /* XXX make sure this i_conn is reliable */
388 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
389 -be32_to_cpu(inc->i_hdr.h_len),
391 list_del_init(&inc->i_item);
395 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
397 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
402 * Pull errors off the error queue.
403 * If msghdr is NULL, we will just purge the error queue.
405 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
407 struct rds_notifier *notifier;
408 struct rds_rdma_notify cmsg;
409 unsigned int count = 0, max_messages = ~0U;
414 memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
416 /* put_cmsg copies to user space and thus may sleep. We can't do this
417 * with rs_lock held, so first grab as many notifications as we can stuff
418 * in the user provided cmsg buffer. We don't try to copy more, to avoid
419 * losing notifications - except when the buffer is so small that it wouldn't
420 * even hold a single notification. Then we give him as much of this single
421 * msg as we can squeeze in, and set MSG_CTRUNC.
424 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
429 spin_lock_irqsave(&rs->rs_lock, flags);
430 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
431 notifier = list_entry(rs->rs_notify_queue.next,
432 struct rds_notifier, n_list);
433 list_move(¬ifier->n_list, ©);
436 spin_unlock_irqrestore(&rs->rs_lock, flags);
441 while (!list_empty(©)) {
442 notifier = list_entry(copy.next, struct rds_notifier, n_list);
445 cmsg.user_token = notifier->n_user_token;
446 cmsg.status = notifier->n_status;
448 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
449 sizeof(cmsg), &cmsg);
454 list_del_init(¬ifier->n_list);
458 /* If we bailed out because of an error in put_cmsg,
459 * we may be left with one or more notifications that we
460 * didn't process. Return them to the head of the list. */
461 if (!list_empty(©)) {
462 spin_lock_irqsave(&rs->rs_lock, flags);
463 list_splice(©, &rs->rs_notify_queue);
464 spin_unlock_irqrestore(&rs->rs_lock, flags);
471 * Queue a congestion notification
473 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
475 uint64_t notify = rs->rs_cong_notify;
479 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
480 sizeof(notify), ¬ify);
484 spin_lock_irqsave(&rs->rs_lock, flags);
485 rs->rs_cong_notify &= ~notify;
486 spin_unlock_irqrestore(&rs->rs_lock, flags);
492 * Receive any control messages.
494 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
499 if (inc->i_rdma_cookie) {
500 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
501 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
506 if ((inc->i_rx_tstamp.tv_sec != 0) &&
507 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
508 ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
509 sizeof(struct timeval),
518 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
521 struct sock *sk = sock->sk;
522 struct rds_sock *rs = rds_sk_to_rs(sk);
524 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
525 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
526 struct rds_incoming *inc = NULL;
528 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
529 timeo = sock_rcvtimeo(sk, nonblock);
531 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
533 if (msg_flags & MSG_OOB)
537 struct iov_iter save;
538 /* If there are pending notifications, do those - and nothing else */
539 if (!list_empty(&rs->rs_notify_queue)) {
540 ret = rds_notify_queue_get(rs, msg);
544 if (rs->rs_cong_notify) {
545 ret = rds_notify_cong(rs, msg);
549 if (!rds_next_incoming(rs, &inc)) {
555 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
556 (!list_empty(&rs->rs_notify_queue) ||
557 rs->rs_cong_notify ||
558 rds_next_incoming(rs, &inc)), timeo);
559 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
561 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
570 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
571 &inc->i_conn->c_faddr,
572 ntohs(inc->i_hdr.h_sport));
573 save = msg->msg_iter;
574 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
579 * if the message we just copied isn't at the head of the
580 * recv queue then someone else raced us to return it, try
581 * to get the next message.
583 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
586 rds_stats_inc(s_recv_deliver_raced);
587 msg->msg_iter = save;
591 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
592 if (msg_flags & MSG_TRUNC)
593 ret = be32_to_cpu(inc->i_hdr.h_len);
594 msg->msg_flags |= MSG_TRUNC;
597 if (rds_cmsg_recv(inc, msg, rs)) {
602 rds_stats_inc(s_recv_delivered);
605 sin->sin_family = AF_INET;
606 sin->sin_port = inc->i_hdr.h_sport;
607 sin->sin_addr.s_addr = inc->i_saddr;
608 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
609 msg->msg_namelen = sizeof(*sin);
622 * The socket is being shut down and we're asked to drop messages that were
623 * queued for recvmsg. The caller has unbound the socket so the receive path
624 * won't queue any more incoming fragments or messages on the socket.
626 void rds_clear_recv_queue(struct rds_sock *rs)
628 struct sock *sk = rds_rs_to_sk(rs);
629 struct rds_incoming *inc, *tmp;
632 write_lock_irqsave(&rs->rs_recv_lock, flags);
633 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
634 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
635 -be32_to_cpu(inc->i_hdr.h_len),
637 list_del_init(&inc->i_item);
640 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
644 * inc->i_saddr isn't used here because it is only set in the receive
647 void rds_inc_info_copy(struct rds_incoming *inc,
648 struct rds_info_iterator *iter,
649 __be32 saddr, __be32 daddr, int flip)
651 struct rds_info_message minfo;
653 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
654 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
659 minfo.lport = inc->i_hdr.h_dport;
660 minfo.fport = inc->i_hdr.h_sport;
664 minfo.lport = inc->i_hdr.h_sport;
665 minfo.fport = inc->i_hdr.h_dport;
670 rds_info_copy(iter, &minfo, sizeof(minfo));