GNU Linux-libre 4.4.283-gnu1
[releases.git] / net / rds / ib_send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/in.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
37 #include <linux/ratelimit.h>
38
39 #include "rds.h"
40 #include "ib.h"
41
42 /*
43  * Convert IB-specific error message to RDS error message and call core
44  * completion handler.
45  */
46 static void rds_ib_send_complete(struct rds_message *rm,
47                                  int wc_status,
48                                  void (*complete)(struct rds_message *rm, int status))
49 {
50         int notify_status;
51
52         switch (wc_status) {
53         case IB_WC_WR_FLUSH_ERR:
54                 return;
55
56         case IB_WC_SUCCESS:
57                 notify_status = RDS_RDMA_SUCCESS;
58                 break;
59
60         case IB_WC_REM_ACCESS_ERR:
61                 notify_status = RDS_RDMA_REMOTE_ERROR;
62                 break;
63
64         default:
65                 notify_status = RDS_RDMA_OTHER_ERROR;
66                 break;
67         }
68         complete(rm, notify_status);
69 }
70
71 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
72                                    struct rm_rdma_op *op,
73                                    int wc_status)
74 {
75         if (op->op_mapped) {
76                 ib_dma_unmap_sg(ic->i_cm_id->device,
77                                 op->op_sg, op->op_nents,
78                                 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
79                 op->op_mapped = 0;
80         }
81
82         /* If the user asked for a completion notification on this
83          * message, we can implement three different semantics:
84          *  1.  Notify when we received the ACK on the RDS message
85          *      that was queued with the RDMA. This provides reliable
86          *      notification of RDMA status at the expense of a one-way
87          *      packet delay.
88          *  2.  Notify when the IB stack gives us the completion event for
89          *      the RDMA operation.
90          *  3.  Notify when the IB stack gives us the completion event for
91          *      the accompanying RDS messages.
92          * Here, we implement approach #3. To implement approach #2,
93          * we would need to take an event for the rdma WR. To implement #1,
94          * don't call rds_rdma_send_complete at all, and fall back to the notify
95          * handling in the ACK processing code.
96          *
97          * Note: There's no need to explicitly sync any RDMA buffers using
98          * ib_dma_sync_sg_for_cpu - the completion for the RDMA
99          * operation itself unmapped the RDMA buffers, which takes care
100          * of synching.
101          */
102         rds_ib_send_complete(container_of(op, struct rds_message, rdma),
103                              wc_status, rds_rdma_send_complete);
104
105         if (op->op_write)
106                 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
107         else
108                 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
109 }
110
111 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
112                                      struct rm_atomic_op *op,
113                                      int wc_status)
114 {
115         /* unmap atomic recvbuf */
116         if (op->op_mapped) {
117                 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
118                                 DMA_FROM_DEVICE);
119                 op->op_mapped = 0;
120         }
121
122         rds_ib_send_complete(container_of(op, struct rds_message, atomic),
123                              wc_status, rds_atomic_send_complete);
124
125         if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
126                 rds_ib_stats_inc(s_ib_atomic_cswp);
127         else
128                 rds_ib_stats_inc(s_ib_atomic_fadd);
129 }
130
131 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
132                                    struct rm_data_op *op,
133                                    int wc_status)
134 {
135         struct rds_message *rm = container_of(op, struct rds_message, data);
136
137         if (op->op_nents)
138                 ib_dma_unmap_sg(ic->i_cm_id->device,
139                                 op->op_sg, op->op_nents,
140                                 DMA_TO_DEVICE);
141
142         if (rm->rdma.op_active && rm->data.op_notify)
143                 rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
144 }
145
146 /*
147  * Unmap the resources associated with a struct send_work.
148  *
149  * Returns the rm for no good reason other than it is unobtainable
150  * other than by switching on wr.opcode, currently, and the caller,
151  * the event handler, needs it.
152  */
153 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
154                                                 struct rds_ib_send_work *send,
155                                                 int wc_status)
156 {
157         struct rds_message *rm = NULL;
158
159         /* In the error case, wc.opcode sometimes contains garbage */
160         switch (send->s_wr.opcode) {
161         case IB_WR_SEND:
162                 if (send->s_op) {
163                         rm = container_of(send->s_op, struct rds_message, data);
164                         rds_ib_send_unmap_data(ic, send->s_op, wc_status);
165                 }
166                 break;
167         case IB_WR_RDMA_WRITE:
168         case IB_WR_RDMA_READ:
169                 if (send->s_op) {
170                         rm = container_of(send->s_op, struct rds_message, rdma);
171                         rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
172                 }
173                 break;
174         case IB_WR_ATOMIC_FETCH_AND_ADD:
175         case IB_WR_ATOMIC_CMP_AND_SWP:
176                 if (send->s_op) {
177                         rm = container_of(send->s_op, struct rds_message, atomic);
178                         rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
179                 }
180                 break;
181         default:
182                 printk_ratelimited(KERN_NOTICE
183                                "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
184                                __func__, send->s_wr.opcode);
185                 break;
186         }
187
188         send->s_wr.opcode = 0xdead;
189
190         return rm;
191 }
192
193 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
194 {
195         struct rds_ib_send_work *send;
196         u32 i;
197
198         for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
199                 struct ib_sge *sge;
200
201                 send->s_op = NULL;
202
203                 send->s_wr.wr_id = i | RDS_IB_SEND_OP;
204                 send->s_wr.sg_list = send->s_sge;
205                 send->s_wr.ex.imm_data = 0;
206
207                 sge = &send->s_sge[0];
208                 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
209                 sge->length = sizeof(struct rds_header);
210                 sge->lkey = ic->i_pd->local_dma_lkey;
211
212                 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
213         }
214 }
215
216 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
217 {
218         struct rds_ib_send_work *send;
219         u32 i;
220
221         for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
222                 if (send->s_op && send->s_wr.opcode != 0xdead)
223                         rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
224         }
225 }
226
227 /*
228  * The only fast path caller always has a non-zero nr, so we don't
229  * bother testing nr before performing the atomic sub.
230  */
231 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
232 {
233         if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
234             waitqueue_active(&rds_ib_ring_empty_wait))
235                 wake_up(&rds_ib_ring_empty_wait);
236         BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
237 }
238
239 /*
240  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
241  * operations performed in the send path.  As the sender allocs and potentially
242  * unallocs the next free entry in the ring it doesn't alter which is
243  * the next to be freed, which is what this is concerned with.
244  */
245 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
246 {
247         struct rds_message *rm = NULL;
248         struct rds_connection *conn = ic->conn;
249         struct rds_ib_send_work *send;
250         u32 completed;
251         u32 oldest;
252         u32 i = 0;
253         int nr_sig = 0;
254
255
256         rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
257                  (unsigned long long)wc->wr_id, wc->status,
258                  ib_wc_status_msg(wc->status), wc->byte_len,
259                  be32_to_cpu(wc->ex.imm_data));
260         rds_ib_stats_inc(s_ib_tx_cq_event);
261
262         if (wc->wr_id == RDS_IB_ACK_WR_ID) {
263                 if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
264                         rds_ib_stats_inc(s_ib_tx_stalled);
265                 rds_ib_ack_send_complete(ic);
266                 return;
267         }
268
269         oldest = rds_ib_ring_oldest(&ic->i_send_ring);
270
271         completed = rds_ib_ring_completed(&ic->i_send_ring,
272                                           (wc->wr_id & ~RDS_IB_SEND_OP),
273                                           oldest);
274
275         for (i = 0; i < completed; i++) {
276                 send = &ic->i_sends[oldest];
277                 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
278                         nr_sig++;
279
280                 rm = rds_ib_send_unmap_op(ic, send, wc->status);
281
282                 if (time_after(jiffies, send->s_queued + HZ / 2))
283                         rds_ib_stats_inc(s_ib_tx_stalled);
284
285                 if (send->s_op) {
286                         if (send->s_op == rm->m_final_op) {
287                                 /* If anyone waited for this message to get
288                                  * flushed out, wake them up now
289                                  */
290                                 rds_message_unmapped(rm);
291                         }
292                         rds_message_put(rm);
293                         send->s_op = NULL;
294                 }
295
296                 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
297         }
298
299         rds_ib_ring_free(&ic->i_send_ring, completed);
300         rds_ib_sub_signaled(ic, nr_sig);
301         nr_sig = 0;
302
303         if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
304             test_bit(0, &conn->c_map_queued))
305                 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
306
307         /* We expect errors as the qp is drained during shutdown */
308         if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
309                 rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
310                                   &conn->c_faddr, wc->status,
311                                   ib_wc_status_msg(wc->status));
312         }
313 }
314
315 /*
316  * This is the main function for allocating credits when sending
317  * messages.
318  *
319  * Conceptually, we have two counters:
320  *  -   send credits: this tells us how many WRs we're allowed
321  *      to submit without overruning the receiver's queue. For
322  *      each SEND WR we post, we decrement this by one.
323  *
324  *  -   posted credits: this tells us how many WRs we recently
325  *      posted to the receive queue. This value is transferred
326  *      to the peer as a "credit update" in a RDS header field.
327  *      Every time we transmit credits to the peer, we subtract
328  *      the amount of transferred credits from this counter.
329  *
330  * It is essential that we avoid situations where both sides have
331  * exhausted their send credits, and are unable to send new credits
332  * to the peer. We achieve this by requiring that we send at least
333  * one credit update to the peer before exhausting our credits.
334  * When new credits arrive, we subtract one credit that is withheld
335  * until we've posted new buffers and are ready to transmit these
336  * credits (see rds_ib_send_add_credits below).
337  *
338  * The RDS send code is essentially single-threaded; rds_send_xmit
339  * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
340  * However, the ACK sending code is independent and can race with
341  * message SENDs.
342  *
343  * In the send path, we need to update the counters for send credits
344  * and the counter of posted buffers atomically - when we use the
345  * last available credit, we cannot allow another thread to race us
346  * and grab the posted credits counter.  Hence, we have to use a
347  * spinlock to protect the credit counter, or use atomics.
348  *
349  * Spinlocks shared between the send and the receive path are bad,
350  * because they create unnecessary delays. An early implementation
351  * using a spinlock showed a 5% degradation in throughput at some
352  * loads.
353  *
354  * This implementation avoids spinlocks completely, putting both
355  * counters into a single atomic, and updating that atomic using
356  * atomic_add (in the receive path, when receiving fresh credits),
357  * and using atomic_cmpxchg when updating the two counters.
358  */
359 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
360                              u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
361 {
362         unsigned int avail, posted, got = 0, advertise;
363         long oldval, newval;
364
365         *adv_credits = 0;
366         if (!ic->i_flowctl)
367                 return wanted;
368
369 try_again:
370         advertise = 0;
371         oldval = newval = atomic_read(&ic->i_credits);
372         posted = IB_GET_POST_CREDITS(oldval);
373         avail = IB_GET_SEND_CREDITS(oldval);
374
375         rdsdebug("wanted=%u credits=%u posted=%u\n",
376                         wanted, avail, posted);
377
378         /* The last credit must be used to send a credit update. */
379         if (avail && !posted)
380                 avail--;
381
382         if (avail < wanted) {
383                 struct rds_connection *conn = ic->i_cm_id->context;
384
385                 /* Oops, there aren't that many credits left! */
386                 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
387                 got = avail;
388         } else {
389                 /* Sometimes you get what you want, lalala. */
390                 got = wanted;
391         }
392         newval -= IB_SET_SEND_CREDITS(got);
393
394         /*
395          * If need_posted is non-zero, then the caller wants
396          * the posted regardless of whether any send credits are
397          * available.
398          */
399         if (posted && (got || need_posted)) {
400                 advertise = min_t(unsigned int, posted, max_posted);
401                 newval -= IB_SET_POST_CREDITS(advertise);
402         }
403
404         /* Finally bill everything */
405         if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
406                 goto try_again;
407
408         *adv_credits = advertise;
409         return got;
410 }
411
412 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
413 {
414         struct rds_ib_connection *ic = conn->c_transport_data;
415
416         if (credits == 0)
417                 return;
418
419         rdsdebug("credits=%u current=%u%s\n",
420                         credits,
421                         IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
422                         test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
423
424         atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
425         if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
426                 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
427
428         WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
429
430         rds_ib_stats_inc(s_ib_rx_credit_updates);
431 }
432
433 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
434 {
435         struct rds_ib_connection *ic = conn->c_transport_data;
436
437         if (posted == 0)
438                 return;
439
440         atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
441
442         /* Decide whether to send an update to the peer now.
443          * If we would send a credit update for every single buffer we
444          * post, we would end up with an ACK storm (ACK arrives,
445          * consumes buffer, we refill the ring, send ACK to remote
446          * advertising the newly posted buffer... ad inf)
447          *
448          * Performance pretty much depends on how often we send
449          * credit updates - too frequent updates mean lots of ACKs.
450          * Too infrequent updates, and the peer will run out of
451          * credits and has to throttle.
452          * For the time being, 16 seems to be a good compromise.
453          */
454         if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
455                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
456 }
457
458 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
459                                              struct rds_ib_send_work *send,
460                                              bool notify)
461 {
462         /*
463          * We want to delay signaling completions just enough to get
464          * the batching benefits but not so much that we create dead time
465          * on the wire.
466          */
467         if (ic->i_unsignaled_wrs-- == 0 || notify) {
468                 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
469                 send->s_wr.send_flags |= IB_SEND_SIGNALED;
470                 return 1;
471         }
472         return 0;
473 }
474
475 /*
476  * This can be called multiple times for a given message.  The first time
477  * we see a message we map its scatterlist into the IB device so that
478  * we can provide that mapped address to the IB scatter gather entries
479  * in the IB work requests.  We translate the scatterlist into a series
480  * of work requests that fragment the message.  These work requests complete
481  * in order so we pass ownership of the message to the completion handler
482  * once we send the final fragment.
483  *
484  * The RDS core uses the c_send_lock to only enter this function once
485  * per connection.  This makes sure that the tx ring alloc/unalloc pairs
486  * don't get out of sync and confuse the ring.
487  */
488 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
489                 unsigned int hdr_off, unsigned int sg, unsigned int off)
490 {
491         struct rds_ib_connection *ic = conn->c_transport_data;
492         struct ib_device *dev = ic->i_cm_id->device;
493         struct rds_ib_send_work *send = NULL;
494         struct rds_ib_send_work *first;
495         struct rds_ib_send_work *prev;
496         struct ib_send_wr *failed_wr;
497         struct scatterlist *scat;
498         u32 pos;
499         u32 i;
500         u32 work_alloc;
501         u32 credit_alloc = 0;
502         u32 posted;
503         u32 adv_credits = 0;
504         int send_flags = 0;
505         int bytes_sent = 0;
506         int ret;
507         int flow_controlled = 0;
508         int nr_sig = 0;
509
510         BUG_ON(off % RDS_FRAG_SIZE);
511         BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
512
513         /* Do not send cong updates to IB loopback */
514         if (conn->c_loopback
515             && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
516                 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
517                 scat = &rm->data.op_sg[sg];
518                 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
519                 return sizeof(struct rds_header) + ret;
520         }
521
522         /* FIXME we may overallocate here */
523         if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
524                 i = 1;
525         else
526                 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
527
528         work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
529         if (work_alloc == 0) {
530                 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
531                 rds_ib_stats_inc(s_ib_tx_ring_full);
532                 ret = -ENOMEM;
533                 goto out;
534         }
535
536         if (ic->i_flowctl) {
537                 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
538                 adv_credits += posted;
539                 if (credit_alloc < work_alloc) {
540                         rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
541                         work_alloc = credit_alloc;
542                         flow_controlled = 1;
543                 }
544                 if (work_alloc == 0) {
545                         set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
546                         rds_ib_stats_inc(s_ib_tx_throttle);
547                         ret = -ENOMEM;
548                         goto out;
549                 }
550         }
551
552         /* map the message the first time we see it */
553         if (!ic->i_data_op) {
554                 if (rm->data.op_nents) {
555                         rm->data.op_count = ib_dma_map_sg(dev,
556                                                           rm->data.op_sg,
557                                                           rm->data.op_nents,
558                                                           DMA_TO_DEVICE);
559                         rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
560                         if (rm->data.op_count == 0) {
561                                 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
562                                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
563                                 ret = -ENOMEM; /* XXX ? */
564                                 goto out;
565                         }
566                 } else {
567                         rm->data.op_count = 0;
568                 }
569
570                 rds_message_addref(rm);
571                 rm->data.op_dmasg = 0;
572                 rm->data.op_dmaoff = 0;
573                 ic->i_data_op = &rm->data;
574
575                 /* Finalize the header */
576                 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
577                         rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
578                 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
579                         rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
580
581                 /* If it has a RDMA op, tell the peer we did it. This is
582                  * used by the peer to release use-once RDMA MRs. */
583                 if (rm->rdma.op_active) {
584                         struct rds_ext_header_rdma ext_hdr;
585
586                         ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
587                         rds_message_add_extension(&rm->m_inc.i_hdr,
588                                         RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
589                 }
590                 if (rm->m_rdma_cookie) {
591                         rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
592                                         rds_rdma_cookie_key(rm->m_rdma_cookie),
593                                         rds_rdma_cookie_offset(rm->m_rdma_cookie));
594                 }
595
596                 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
597                  * we should not do this unless we have a chance of at least
598                  * sticking the header into the send ring. Which is why we
599                  * should call rds_ib_ring_alloc first. */
600                 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
601                 rds_message_make_checksum(&rm->m_inc.i_hdr);
602
603                 /*
604                  * Update adv_credits since we reset the ACK_REQUIRED bit.
605                  */
606                 if (ic->i_flowctl) {
607                         rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
608                         adv_credits += posted;
609                         BUG_ON(adv_credits > 255);
610                 }
611         }
612
613         /* Sometimes you want to put a fence between an RDMA
614          * READ and the following SEND.
615          * We could either do this all the time
616          * or when requested by the user. Right now, we let
617          * the application choose.
618          */
619         if (rm->rdma.op_active && rm->rdma.op_fence)
620                 send_flags = IB_SEND_FENCE;
621
622         /* Each frag gets a header. Msgs may be 0 bytes */
623         send = &ic->i_sends[pos];
624         first = send;
625         prev = NULL;
626         scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
627         i = 0;
628         do {
629                 unsigned int len = 0;
630
631                 /* Set up the header */
632                 send->s_wr.send_flags = send_flags;
633                 send->s_wr.opcode = IB_WR_SEND;
634                 send->s_wr.num_sge = 1;
635                 send->s_wr.next = NULL;
636                 send->s_queued = jiffies;
637                 send->s_op = NULL;
638
639                 send->s_sge[0].addr = ic->i_send_hdrs_dma
640                         + (pos * sizeof(struct rds_header));
641                 send->s_sge[0].length = sizeof(struct rds_header);
642
643                 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
644
645                 /* Set up the data, if present */
646                 if (i < work_alloc
647                     && scat != &rm->data.op_sg[rm->data.op_count]) {
648                         len = min(RDS_FRAG_SIZE,
649                                 ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
650                         send->s_wr.num_sge = 2;
651
652                         send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
653                         send->s_sge[1].addr += rm->data.op_dmaoff;
654                         send->s_sge[1].length = len;
655
656                         bytes_sent += len;
657                         rm->data.op_dmaoff += len;
658                         if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
659                                 scat++;
660                                 rm->data.op_dmasg++;
661                                 rm->data.op_dmaoff = 0;
662                         }
663                 }
664
665                 rds_ib_set_wr_signal_state(ic, send, 0);
666
667                 /*
668                  * Always signal the last one if we're stopping due to flow control.
669                  */
670                 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
671                         send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
672
673                 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
674                         nr_sig++;
675
676                 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
677                          &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
678
679                 if (ic->i_flowctl && adv_credits) {
680                         struct rds_header *hdr = &ic->i_send_hdrs[pos];
681
682                         /* add credit and redo the header checksum */
683                         hdr->h_credit = adv_credits;
684                         rds_message_make_checksum(hdr);
685                         adv_credits = 0;
686                         rds_ib_stats_inc(s_ib_tx_credit_updates);
687                 }
688
689                 if (prev)
690                         prev->s_wr.next = &send->s_wr;
691                 prev = send;
692
693                 pos = (pos + 1) % ic->i_send_ring.w_nr;
694                 send = &ic->i_sends[pos];
695                 i++;
696
697         } while (i < work_alloc
698                  && scat != &rm->data.op_sg[rm->data.op_count]);
699
700         /* Account the RDS header in the number of bytes we sent, but just once.
701          * The caller has no concept of fragmentation. */
702         if (hdr_off == 0)
703                 bytes_sent += sizeof(struct rds_header);
704
705         /* if we finished the message then send completion owns it */
706         if (scat == &rm->data.op_sg[rm->data.op_count]) {
707                 prev->s_op = ic->i_data_op;
708                 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
709                 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
710                         ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
711                         prev->s_wr.send_flags |= IB_SEND_SIGNALED;
712                         nr_sig++;
713                 }
714                 ic->i_data_op = NULL;
715         }
716
717         /* Put back wrs & credits we didn't use */
718         if (i < work_alloc) {
719                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
720                 work_alloc = i;
721         }
722         if (ic->i_flowctl && i < credit_alloc)
723                 rds_ib_send_add_credits(conn, credit_alloc - i);
724
725         if (nr_sig)
726                 atomic_add(nr_sig, &ic->i_signaled_sends);
727
728         /* XXX need to worry about failed_wr and partial sends. */
729         failed_wr = &first->s_wr;
730         ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
731         rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
732                  first, &first->s_wr, ret, failed_wr);
733         BUG_ON(failed_wr != &first->s_wr);
734         if (ret) {
735                 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
736                        "returned %d\n", &conn->c_faddr, ret);
737                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
738                 rds_ib_sub_signaled(ic, nr_sig);
739                 if (prev->s_op) {
740                         ic->i_data_op = prev->s_op;
741                         prev->s_op = NULL;
742                 }
743
744                 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
745                 goto out;
746         }
747
748         ret = bytes_sent;
749 out:
750         BUG_ON(adv_credits);
751         return ret;
752 }
753
754 /*
755  * Issue atomic operation.
756  * A simplified version of the rdma case, we always map 1 SG, and
757  * only 8 bytes, for the return value from the atomic operation.
758  */
759 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
760 {
761         struct rds_ib_connection *ic = conn->c_transport_data;
762         struct rds_ib_send_work *send = NULL;
763         struct ib_send_wr *failed_wr;
764         struct rds_ib_device *rds_ibdev;
765         u32 pos;
766         u32 work_alloc;
767         int ret;
768         int nr_sig = 0;
769
770         rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
771
772         work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
773         if (work_alloc != 1) {
774                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
775                 rds_ib_stats_inc(s_ib_tx_ring_full);
776                 ret = -ENOMEM;
777                 goto out;
778         }
779
780         /* address of send request in ring */
781         send = &ic->i_sends[pos];
782         send->s_queued = jiffies;
783
784         if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
785                 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
786                 send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
787                 send->s_atomic_wr.swap = op->op_m_cswp.swap;
788                 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
789                 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
790         } else { /* FADD */
791                 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
792                 send->s_atomic_wr.compare_add = op->op_m_fadd.add;
793                 send->s_atomic_wr.swap = 0;
794                 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
795                 send->s_atomic_wr.swap_mask = 0;
796         }
797         nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
798         send->s_atomic_wr.wr.num_sge = 1;
799         send->s_atomic_wr.wr.next = NULL;
800         send->s_atomic_wr.remote_addr = op->op_remote_addr;
801         send->s_atomic_wr.rkey = op->op_rkey;
802         send->s_op = op;
803         rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
804
805         /* map 8 byte retval buffer to the device */
806         ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
807         rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
808         if (ret != 1) {
809                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
810                 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
811                 ret = -ENOMEM; /* XXX ? */
812                 goto out;
813         }
814
815         /* Convert our struct scatterlist to struct ib_sge */
816         send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
817         send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
818         send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
819
820         rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
821                  send->s_sge[0].addr, send->s_sge[0].length);
822
823         if (nr_sig)
824                 atomic_add(nr_sig, &ic->i_signaled_sends);
825
826         failed_wr = &send->s_atomic_wr.wr;
827         ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
828         rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
829                  send, &send->s_atomic_wr, ret, failed_wr);
830         BUG_ON(failed_wr != &send->s_atomic_wr.wr);
831         if (ret) {
832                 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
833                        "returned %d\n", &conn->c_faddr, ret);
834                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
835                 rds_ib_sub_signaled(ic, nr_sig);
836                 goto out;
837         }
838
839         if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
840                 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
841                 BUG_ON(failed_wr != &send->s_atomic_wr.wr);
842         }
843
844 out:
845         return ret;
846 }
847
848 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
849 {
850         struct rds_ib_connection *ic = conn->c_transport_data;
851         struct rds_ib_send_work *send = NULL;
852         struct rds_ib_send_work *first;
853         struct rds_ib_send_work *prev;
854         struct ib_send_wr *failed_wr;
855         struct scatterlist *scat;
856         unsigned long len;
857         u64 remote_addr = op->op_remote_addr;
858         u32 max_sge = ic->rds_ibdev->max_sge;
859         u32 pos;
860         u32 work_alloc;
861         u32 i;
862         u32 j;
863         int sent;
864         int ret;
865         int num_sge;
866         int nr_sig = 0;
867
868         /* map the op the first time we see it */
869         if (!op->op_mapped) {
870                 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
871                                              op->op_sg, op->op_nents, (op->op_write) ?
872                                              DMA_TO_DEVICE : DMA_FROM_DEVICE);
873                 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
874                 if (op->op_count == 0) {
875                         rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
876                         ret = -ENOMEM; /* XXX ? */
877                         goto out;
878                 }
879
880                 op->op_mapped = 1;
881         }
882
883         /*
884          * Instead of knowing how to return a partial rdma read/write we insist that there
885          * be enough work requests to send the entire message.
886          */
887         i = ceil(op->op_count, max_sge);
888
889         work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
890         if (work_alloc != i) {
891                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
892                 rds_ib_stats_inc(s_ib_tx_ring_full);
893                 ret = -ENOMEM;
894                 goto out;
895         }
896
897         send = &ic->i_sends[pos];
898         first = send;
899         prev = NULL;
900         scat = &op->op_sg[0];
901         sent = 0;
902         num_sge = op->op_count;
903
904         for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
905                 send->s_wr.send_flags = 0;
906                 send->s_queued = jiffies;
907                 send->s_op = NULL;
908
909                 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
910
911                 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
912                 send->s_rdma_wr.remote_addr = remote_addr;
913                 send->s_rdma_wr.rkey = op->op_rkey;
914
915                 if (num_sge > max_sge) {
916                         send->s_rdma_wr.wr.num_sge = max_sge;
917                         num_sge -= max_sge;
918                 } else {
919                         send->s_rdma_wr.wr.num_sge = num_sge;
920                 }
921
922                 send->s_rdma_wr.wr.next = NULL;
923
924                 if (prev)
925                         prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
926
927                 for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
928                      scat != &op->op_sg[op->op_count]; j++) {
929                         len = ib_sg_dma_len(ic->i_cm_id->device, scat);
930                         send->s_sge[j].addr =
931                                  ib_sg_dma_address(ic->i_cm_id->device, scat);
932                         send->s_sge[j].length = len;
933                         send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
934
935                         sent += len;
936                         rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
937
938                         remote_addr += len;
939                         scat++;
940                 }
941
942                 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
943                         &send->s_rdma_wr.wr,
944                         send->s_rdma_wr.wr.num_sge,
945                         send->s_rdma_wr.wr.next);
946
947                 prev = send;
948                 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
949                         send = ic->i_sends;
950         }
951
952         /* give a reference to the last op */
953         if (scat == &op->op_sg[op->op_count]) {
954                 prev->s_op = op;
955                 rds_message_addref(container_of(op, struct rds_message, rdma));
956         }
957
958         if (i < work_alloc) {
959                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
960                 work_alloc = i;
961         }
962
963         if (nr_sig)
964                 atomic_add(nr_sig, &ic->i_signaled_sends);
965
966         failed_wr = &first->s_rdma_wr.wr;
967         ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
968         rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
969                  first, &first->s_rdma_wr.wr, ret, failed_wr);
970         BUG_ON(failed_wr != &first->s_rdma_wr.wr);
971         if (ret) {
972                 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
973                        "returned %d\n", &conn->c_faddr, ret);
974                 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
975                 rds_ib_sub_signaled(ic, nr_sig);
976                 goto out;
977         }
978
979         if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
980                 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
981                 BUG_ON(failed_wr != &first->s_rdma_wr.wr);
982         }
983
984
985 out:
986         return ret;
987 }
988
989 void rds_ib_xmit_complete(struct rds_connection *conn)
990 {
991         struct rds_ib_connection *ic = conn->c_transport_data;
992
993         /* We may have a pending ACK or window update we were unable
994          * to send previously (due to flow control). Try again. */
995         rds_ib_attempt_ack(ic);
996 }