1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Work Requests exploiting Infiniband API
7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
8 * are submitted to either RC SQ or RC RQ respectively
9 * (reliably connected send/receive queue)
10 * and become work queue entries (WQEs).
11 * While an SQ WR/WQE is pending, we track it until transmission completion.
12 * Through a send or receive completion queue (CQ) respectively,
13 * we get completion queue entries (CQEs) [aka work completions (WCs)].
14 * Since the CQ callback is called from IRQ context, we split work by using
15 * bottom halves implemented by tasklets.
17 * SMC uses this to exchange LLC (link layer control)
18 * and CDC (connection data control) messages.
20 * Copyright IBM Corp. 2016
22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
25 #include <linux/atomic.h>
26 #include <linux/hashtable.h>
27 #include <linux/wait.h>
28 #include <rdma/ib_verbs.h>
29 #include <asm/div64.h>
34 #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
36 #define SMC_WR_RX_HASH_BITS 4
37 static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
38 static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
40 struct smc_wr_tx_pend { /* control data for a pending send request */
41 u64 wr_id; /* work request id sent */
42 smc_wr_tx_handler handler;
43 enum ib_wc_status wc_status; /* CQE status */
44 struct smc_link *link;
46 struct smc_wr_tx_pend_priv priv;
50 /******************************** send queue *********************************/
52 /*------------------------------- completion --------------------------------*/
54 /* returns true if at least one tx work request is pending on the given link */
55 static inline bool smc_wr_is_tx_pend(struct smc_link *link)
57 if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
64 /* wait till all pending tx work requests on the given link are completed */
65 int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
67 if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
68 SMC_WR_TX_WAIT_PENDING_TIME))
74 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
78 for (i = 0; i < link->wr_tx_cnt; i++) {
79 if (link->wr_tx_pends[i].wr_id == wr_id)
82 return link->wr_tx_cnt;
85 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
87 struct smc_wr_tx_pend pnd_snd;
88 struct smc_link *link;
92 link = wc->qp->qp_context;
94 if (wc->opcode == IB_WC_REG_MR) {
96 link->wr_reg_state = FAILED;
98 link->wr_reg_state = CONFIRMED;
99 smc_wr_wakeup_reg_wait(link);
103 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
104 if (pnd_snd_idx == link->wr_tx_cnt)
106 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
107 if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
108 complete(&link->wr_tx_compl[pnd_snd_idx]);
109 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
110 /* clear the full struct smc_wr_tx_pend including .priv */
111 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
112 sizeof(link->wr_tx_pends[pnd_snd_idx]));
113 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
114 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
115 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
118 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
119 /* clear full struct smc_wr_tx_pend including .priv */
120 memset(&link->wr_tx_pends[i], 0,
121 sizeof(link->wr_tx_pends[i]));
122 memset(&link->wr_tx_bufs[i], 0,
123 sizeof(link->wr_tx_bufs[i]));
124 clear_bit(i, link->wr_tx_mask);
127 smcr_link_down_cond_sched(link);
130 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
131 wake_up(&link->wr_tx_wait);
134 static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
136 struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
137 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
144 memset(&wc, 0, sizeof(wc));
145 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
147 ib_req_notify_cq(dev->roce_cq_send,
149 IB_CQ_REPORT_MISSED_EVENTS);
153 for (i = 0; i < rc; i++)
154 smc_wr_tx_process_cqe(&wc[i]);
160 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
162 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
164 tasklet_schedule(&dev->send_tasklet);
167 /*---------------------------- request submission ---------------------------*/
169 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
171 *idx = link->wr_tx_cnt;
172 if (!smc_link_usable(link))
174 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
175 if (!test_and_set_bit(*idx, link->wr_tx_mask))
178 *idx = link->wr_tx_cnt;
183 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
184 * and sets info for pending transmit tracking
185 * @link: Pointer to smc_link used to later send the message.
186 * @handler: Send completion handler function pointer.
187 * @wr_buf: Out value returns pointer to message buffer.
188 * @wr_rdma_buf: Out value returns pointer to rdma work request.
189 * @wr_pend_priv: Out value returns pointer serving as handler context.
191 * Return: 0 on success, or -errno on error.
193 int smc_wr_tx_get_free_slot(struct smc_link *link,
194 smc_wr_tx_handler handler,
195 struct smc_wr_buf **wr_buf,
196 struct smc_rdma_wr **wr_rdma_buf,
197 struct smc_wr_tx_pend_priv **wr_pend_priv)
199 struct smc_link_group *lgr = smc_get_lgr(link);
200 struct smc_wr_tx_pend *wr_pend;
201 u32 idx = link->wr_tx_cnt;
202 struct ib_send_wr *wr_ib;
207 *wr_pend_priv = NULL;
208 if (in_softirq() || lgr->terminating) {
209 rc = smc_wr_tx_get_free_slot_index(link, &idx);
213 rc = wait_event_interruptible_timeout(
215 !smc_link_usable(link) ||
217 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
218 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
220 /* timeout - terminate link */
221 smcr_link_down_cond_sched(link);
224 if (idx == link->wr_tx_cnt)
227 wr_id = smc_wr_tx_get_next_wr_id(link);
228 wr_pend = &link->wr_tx_pends[idx];
229 wr_pend->wr_id = wr_id;
230 wr_pend->handler = handler;
231 wr_pend->link = link;
233 wr_ib = &link->wr_tx_ibs[idx];
234 wr_ib->wr_id = wr_id;
235 *wr_buf = &link->wr_tx_bufs[idx];
237 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
238 *wr_pend_priv = &wr_pend->priv;
242 int smc_wr_tx_put_slot(struct smc_link *link,
243 struct smc_wr_tx_pend_priv *wr_pend_priv)
245 struct smc_wr_tx_pend *pend;
247 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
248 if (pend->idx < link->wr_tx_cnt) {
251 /* clear the full struct smc_wr_tx_pend including .priv */
252 memset(&link->wr_tx_pends[idx], 0,
253 sizeof(link->wr_tx_pends[idx]));
254 memset(&link->wr_tx_bufs[idx], 0,
255 sizeof(link->wr_tx_bufs[idx]));
256 test_and_clear_bit(idx, link->wr_tx_mask);
257 wake_up(&link->wr_tx_wait);
264 /* Send prepared WR slot via ib_post_send.
265 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
267 int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
269 struct smc_wr_tx_pend *pend;
272 ib_req_notify_cq(link->smcibdev->roce_cq_send,
273 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
274 pend = container_of(priv, struct smc_wr_tx_pend, priv);
275 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
277 smc_wr_tx_put_slot(link, priv);
278 smcr_link_down_cond_sched(link);
283 /* Send prepared WR slot via ib_post_send and wait for send completion
285 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
287 int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
288 unsigned long timeout)
290 struct smc_wr_tx_pend *pend;
293 pend = container_of(priv, struct smc_wr_tx_pend, priv);
294 pend->compl_requested = 1;
295 init_completion(&link->wr_tx_compl[pend->idx]);
297 rc = smc_wr_tx_send(link, priv);
300 /* wait for completion by smc_wr_tx_process_cqe() */
301 rc = wait_for_completion_interruptible_timeout(
302 &link->wr_tx_compl[pend->idx], timeout);
310 /* Register a memory region and wait for result. */
311 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
315 ib_req_notify_cq(link->smcibdev->roce_cq_send,
316 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
317 link->wr_reg_state = POSTED;
318 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
319 link->wr_reg.mr = mr;
320 link->wr_reg.key = mr->rkey;
321 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
325 atomic_inc(&link->wr_reg_refcnt);
326 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
327 (link->wr_reg_state != POSTED),
328 SMC_WR_REG_MR_WAIT_TIME);
329 if (atomic_dec_and_test(&link->wr_reg_refcnt))
330 wake_up_all(&link->wr_reg_wait);
332 /* timeout - terminate link */
333 smcr_link_down_cond_sched(link);
336 if (rc == -ERESTARTSYS)
338 switch (link->wr_reg_state) {
352 void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
353 smc_wr_tx_filter filter,
354 smc_wr_tx_dismisser dismisser,
357 struct smc_wr_tx_pend_priv *tx_pend;
358 struct smc_wr_rx_hdr *wr_tx;
361 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
362 wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
363 if (wr_tx->type != wr_tx_hdr_type)
365 tx_pend = &link->wr_tx_pends[i].priv;
366 if (filter(tx_pend, data))
371 /****************************** receive queue ********************************/
373 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
375 struct smc_wr_rx_handler *h_iter;
378 spin_lock(&smc_wr_rx_hash_lock);
379 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
380 if (h_iter->type == handler->type) {
385 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
387 spin_unlock(&smc_wr_rx_hash_lock);
391 /* Demultiplex a received work request based on the message type to its handler.
392 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
393 * and not being modified any more afterwards so we don't need to lock it.
395 static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
397 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
398 struct smc_wr_rx_handler *handler;
399 struct smc_wr_rx_hdr *wr_rx;
403 if (wc->byte_len < sizeof(*wr_rx))
404 return; /* short message */
405 temp_wr_id = wc->wr_id;
406 index = do_div(temp_wr_id, link->wr_rx_cnt);
407 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
408 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
409 if (handler->type == wr_rx->type)
410 handler->handler(wc, wr_rx);
414 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
416 struct smc_link *link;
419 for (i = 0; i < num; i++) {
420 link = wc[i].qp->qp_context;
421 if (wc[i].status == IB_WC_SUCCESS) {
422 link->wr_rx_tstamp = jiffies;
423 smc_wr_rx_demultiplex(&wc[i]);
424 smc_wr_rx_post(link); /* refill WR RX */
426 /* handle status errors */
427 switch (wc[i].status) {
428 case IB_WC_RETRY_EXC_ERR:
429 case IB_WC_RNR_RETRY_EXC_ERR:
430 case IB_WC_WR_FLUSH_ERR:
431 smcr_link_down_cond_sched(link);
434 smc_wr_rx_post(link); /* refill WR RX */
441 static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
443 struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
444 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
451 memset(&wc, 0, sizeof(wc));
452 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
454 ib_req_notify_cq(dev->roce_cq_recv,
456 | IB_CQ_REPORT_MISSED_EVENTS);
460 smc_wr_rx_process_cqes(&wc[0], rc);
466 void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
468 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
470 tasklet_schedule(&dev->recv_tasklet);
473 int smc_wr_rx_post_init(struct smc_link *link)
478 for (i = 0; i < link->wr_rx_cnt; i++)
479 rc = smc_wr_rx_post(link);
483 /***************************** init, exit, misc ******************************/
485 void smc_wr_remember_qp_attr(struct smc_link *lnk)
487 struct ib_qp_attr *attr = &lnk->qp_attr;
488 struct ib_qp_init_attr init_attr;
490 memset(attr, 0, sizeof(*attr));
491 memset(&init_attr, 0, sizeof(init_attr));
492 ib_query_qp(lnk->roce_qp, attr,
505 IB_QP_MIN_RNR_TIMER |
507 IB_QP_PATH_MIG_STATE |
512 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
513 lnk->qp_attr.cap.max_send_wr);
514 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
515 lnk->qp_attr.cap.max_recv_wr);
518 static void smc_wr_init_sge(struct smc_link *lnk)
522 for (i = 0; i < lnk->wr_tx_cnt; i++) {
523 lnk->wr_tx_sges[i].addr =
524 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
525 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
526 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
527 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
528 lnk->roce_pd->local_dma_lkey;
529 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
530 lnk->roce_pd->local_dma_lkey;
531 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
532 lnk->roce_pd->local_dma_lkey;
533 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
534 lnk->roce_pd->local_dma_lkey;
535 lnk->wr_tx_ibs[i].next = NULL;
536 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
537 lnk->wr_tx_ibs[i].num_sge = 1;
538 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
539 lnk->wr_tx_ibs[i].send_flags =
540 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
541 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
542 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
543 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
544 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
545 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
546 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
548 for (i = 0; i < lnk->wr_rx_cnt; i++) {
549 lnk->wr_rx_sges[i].addr =
550 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
551 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
552 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
553 lnk->wr_rx_ibs[i].next = NULL;
554 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
555 lnk->wr_rx_ibs[i].num_sge = 1;
557 lnk->wr_reg.wr.next = NULL;
558 lnk->wr_reg.wr.num_sge = 0;
559 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
560 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
561 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
564 void smc_wr_free_link(struct smc_link *lnk)
566 struct ib_device *ibdev;
570 ibdev = lnk->smcibdev->ibdev;
572 smc_wr_wakeup_reg_wait(lnk);
573 smc_wr_wakeup_tx_wait(lnk);
575 if (smc_wr_tx_wait_no_pending_sends(lnk))
576 memset(lnk->wr_tx_mask, 0,
577 BITS_TO_LONGS(SMC_WR_BUF_CNT) *
578 sizeof(*lnk->wr_tx_mask));
579 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
580 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
582 if (lnk->wr_rx_dma_addr) {
583 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
584 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
586 lnk->wr_rx_dma_addr = 0;
588 if (lnk->wr_tx_dma_addr) {
589 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
590 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
592 lnk->wr_tx_dma_addr = 0;
596 void smc_wr_free_link_mem(struct smc_link *lnk)
598 kfree(lnk->wr_tx_compl);
599 lnk->wr_tx_compl = NULL;
600 kfree(lnk->wr_tx_pends);
601 lnk->wr_tx_pends = NULL;
602 kfree(lnk->wr_tx_mask);
603 lnk->wr_tx_mask = NULL;
604 kfree(lnk->wr_tx_sges);
605 lnk->wr_tx_sges = NULL;
606 kfree(lnk->wr_tx_rdma_sges);
607 lnk->wr_tx_rdma_sges = NULL;
608 kfree(lnk->wr_rx_sges);
609 lnk->wr_rx_sges = NULL;
610 kfree(lnk->wr_tx_rdmas);
611 lnk->wr_tx_rdmas = NULL;
612 kfree(lnk->wr_rx_ibs);
613 lnk->wr_rx_ibs = NULL;
614 kfree(lnk->wr_tx_ibs);
615 lnk->wr_tx_ibs = NULL;
616 kfree(lnk->wr_tx_bufs);
617 lnk->wr_tx_bufs = NULL;
618 kfree(lnk->wr_rx_bufs);
619 lnk->wr_rx_bufs = NULL;
622 int smc_wr_alloc_link_mem(struct smc_link *link)
624 /* allocate link related memory */
625 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
626 if (!link->wr_tx_bufs)
628 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
630 if (!link->wr_rx_bufs)
631 goto no_mem_wr_tx_bufs;
632 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
634 if (!link->wr_tx_ibs)
635 goto no_mem_wr_rx_bufs;
636 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
637 sizeof(link->wr_rx_ibs[0]),
639 if (!link->wr_rx_ibs)
640 goto no_mem_wr_tx_ibs;
641 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
642 sizeof(link->wr_tx_rdmas[0]),
644 if (!link->wr_tx_rdmas)
645 goto no_mem_wr_rx_ibs;
646 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
647 sizeof(link->wr_tx_rdma_sges[0]),
649 if (!link->wr_tx_rdma_sges)
650 goto no_mem_wr_tx_rdmas;
651 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
653 if (!link->wr_tx_sges)
654 goto no_mem_wr_tx_rdma_sges;
655 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
656 sizeof(link->wr_rx_sges[0]),
658 if (!link->wr_rx_sges)
659 goto no_mem_wr_tx_sges;
660 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
661 sizeof(*link->wr_tx_mask),
663 if (!link->wr_tx_mask)
664 goto no_mem_wr_rx_sges;
665 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
666 sizeof(link->wr_tx_pends[0]),
668 if (!link->wr_tx_pends)
669 goto no_mem_wr_tx_mask;
670 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
671 sizeof(link->wr_tx_compl[0]),
673 if (!link->wr_tx_compl)
674 goto no_mem_wr_tx_pends;
678 kfree(link->wr_tx_pends);
680 kfree(link->wr_tx_mask);
682 kfree(link->wr_rx_sges);
684 kfree(link->wr_tx_sges);
685 no_mem_wr_tx_rdma_sges:
686 kfree(link->wr_tx_rdma_sges);
688 kfree(link->wr_tx_rdmas);
690 kfree(link->wr_rx_ibs);
692 kfree(link->wr_tx_ibs);
694 kfree(link->wr_rx_bufs);
696 kfree(link->wr_tx_bufs);
701 void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
703 tasklet_kill(&smcibdev->recv_tasklet);
704 tasklet_kill(&smcibdev->send_tasklet);
707 void smc_wr_add_dev(struct smc_ib_device *smcibdev)
709 tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn);
710 tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
713 int smc_wr_create_link(struct smc_link *lnk)
715 struct ib_device *ibdev = lnk->smcibdev->ibdev;
718 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
720 lnk->wr_rx_dma_addr = ib_dma_map_single(
721 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
723 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
724 lnk->wr_rx_dma_addr = 0;
728 lnk->wr_tx_dma_addr = ib_dma_map_single(
729 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
731 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
735 smc_wr_init_sge(lnk);
736 memset(lnk->wr_tx_mask, 0,
737 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
738 init_waitqueue_head(&lnk->wr_tx_wait);
739 atomic_set(&lnk->wr_tx_refcnt, 0);
740 init_waitqueue_head(&lnk->wr_reg_wait);
741 atomic_set(&lnk->wr_reg_refcnt, 0);
745 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
746 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
748 lnk->wr_rx_dma_addr = 0;