1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Work Requests exploiting Infiniband API
7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
8 * are submitted to either RC SQ or RC RQ respectively
9 * (reliably connected send/receive queue)
10 * and become work queue entries (WQEs).
11 * While an SQ WR/WQE is pending, we track it until transmission completion.
12 * Through a send or receive completion queue (CQ) respectively,
13 * we get completion queue entries (CQEs) [aka work completions (WCs)].
14 * Since the CQ callback is called from IRQ context, we split work by using
15 * bottom halves implemented by tasklets.
17 * SMC uses this to exchange LLC (link layer control)
18 * and CDC (connection data control) messages.
20 * Copyright IBM Corp. 2016
22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
25 #include <linux/atomic.h>
26 #include <linux/hashtable.h>
27 #include <linux/wait.h>
28 #include <rdma/ib_verbs.h>
29 #include <asm/div64.h>
34 #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
36 #define SMC_WR_RX_HASH_BITS 4
37 static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
38 static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
40 struct smc_wr_tx_pend { /* control data for a pending send request */
41 u64 wr_id; /* work request id sent */
42 smc_wr_tx_handler handler;
43 enum ib_wc_status wc_status; /* CQE status */
44 struct smc_link *link;
46 struct smc_wr_tx_pend_priv priv;
50 /******************************** send queue *********************************/
52 /*------------------------------- completion --------------------------------*/
54 /* returns true if at least one tx work request is pending on the given link */
55 static inline bool smc_wr_is_tx_pend(struct smc_link *link)
57 if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
64 /* wait till all pending tx work requests on the given link are completed */
65 void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
67 wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
70 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
74 for (i = 0; i < link->wr_tx_cnt; i++) {
75 if (link->wr_tx_pends[i].wr_id == wr_id)
78 return link->wr_tx_cnt;
81 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
83 struct smc_wr_tx_pend pnd_snd;
84 struct smc_link *link;
87 link = wc->qp->qp_context;
89 if (wc->opcode == IB_WC_REG_MR) {
91 link->wr_reg_state = FAILED;
93 link->wr_reg_state = CONFIRMED;
94 smc_wr_wakeup_reg_wait(link);
98 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
99 if (pnd_snd_idx == link->wr_tx_cnt)
101 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
102 if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
103 complete(&link->wr_tx_compl[pnd_snd_idx]);
104 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
105 /* clear the full struct smc_wr_tx_pend including .priv */
106 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
107 sizeof(link->wr_tx_pends[pnd_snd_idx]));
108 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
109 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
110 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
114 smcr_link_down_cond_sched(link);
117 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
118 wake_up(&link->wr_tx_wait);
121 static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
123 struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
124 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
131 memset(&wc, 0, sizeof(wc));
132 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
134 ib_req_notify_cq(dev->roce_cq_send,
136 IB_CQ_REPORT_MISSED_EVENTS);
140 for (i = 0; i < rc; i++)
141 smc_wr_tx_process_cqe(&wc[i]);
147 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
149 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
151 tasklet_schedule(&dev->send_tasklet);
154 /*---------------------------- request submission ---------------------------*/
156 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
158 *idx = link->wr_tx_cnt;
159 if (!smc_link_sendable(link))
161 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
162 if (!test_and_set_bit(*idx, link->wr_tx_mask))
165 *idx = link->wr_tx_cnt;
170 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
171 * and sets info for pending transmit tracking
172 * @link: Pointer to smc_link used to later send the message.
173 * @handler: Send completion handler function pointer.
174 * @wr_buf: Out value returns pointer to message buffer.
175 * @wr_rdma_buf: Out value returns pointer to rdma work request.
176 * @wr_pend_priv: Out value returns pointer serving as handler context.
178 * Return: 0 on success, or -errno on error.
180 int smc_wr_tx_get_free_slot(struct smc_link *link,
181 smc_wr_tx_handler handler,
182 struct smc_wr_buf **wr_buf,
183 struct smc_rdma_wr **wr_rdma_buf,
184 struct smc_wr_tx_pend_priv **wr_pend_priv)
186 struct smc_link_group *lgr = smc_get_lgr(link);
187 struct smc_wr_tx_pend *wr_pend;
188 u32 idx = link->wr_tx_cnt;
189 struct ib_send_wr *wr_ib;
194 *wr_pend_priv = NULL;
195 if (in_softirq() || lgr->terminating) {
196 rc = smc_wr_tx_get_free_slot_index(link, &idx);
200 rc = wait_event_interruptible_timeout(
202 !smc_link_sendable(link) ||
204 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
205 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
207 /* timeout - terminate link */
208 smcr_link_down_cond_sched(link);
211 if (idx == link->wr_tx_cnt)
214 wr_id = smc_wr_tx_get_next_wr_id(link);
215 wr_pend = &link->wr_tx_pends[idx];
216 wr_pend->wr_id = wr_id;
217 wr_pend->handler = handler;
218 wr_pend->link = link;
220 wr_ib = &link->wr_tx_ibs[idx];
221 wr_ib->wr_id = wr_id;
222 *wr_buf = &link->wr_tx_bufs[idx];
224 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
225 *wr_pend_priv = &wr_pend->priv;
229 int smc_wr_tx_put_slot(struct smc_link *link,
230 struct smc_wr_tx_pend_priv *wr_pend_priv)
232 struct smc_wr_tx_pend *pend;
234 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
235 if (pend->idx < link->wr_tx_cnt) {
238 /* clear the full struct smc_wr_tx_pend including .priv */
239 memset(&link->wr_tx_pends[idx], 0,
240 sizeof(link->wr_tx_pends[idx]));
241 memset(&link->wr_tx_bufs[idx], 0,
242 sizeof(link->wr_tx_bufs[idx]));
243 test_and_clear_bit(idx, link->wr_tx_mask);
244 wake_up(&link->wr_tx_wait);
251 /* Send prepared WR slot via ib_post_send.
252 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
254 int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
256 struct smc_wr_tx_pend *pend;
259 ib_req_notify_cq(link->smcibdev->roce_cq_send,
260 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
261 pend = container_of(priv, struct smc_wr_tx_pend, priv);
262 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
264 smc_wr_tx_put_slot(link, priv);
265 smcr_link_down_cond_sched(link);
270 /* Send prepared WR slot via ib_post_send and wait for send completion
272 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
274 int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
275 unsigned long timeout)
277 struct smc_wr_tx_pend *pend;
281 pend = container_of(priv, struct smc_wr_tx_pend, priv);
282 pend->compl_requested = 1;
284 init_completion(&link->wr_tx_compl[pnd_idx]);
286 rc = smc_wr_tx_send(link, priv);
289 /* wait for completion by smc_wr_tx_process_cqe() */
290 rc = wait_for_completion_interruptible_timeout(
291 &link->wr_tx_compl[pnd_idx], timeout);
299 /* Register a memory region and wait for result. */
300 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
304 ib_req_notify_cq(link->smcibdev->roce_cq_send,
305 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
306 link->wr_reg_state = POSTED;
307 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
308 link->wr_reg.mr = mr;
309 link->wr_reg.key = mr->rkey;
310 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
314 atomic_inc(&link->wr_reg_refcnt);
315 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
316 (link->wr_reg_state != POSTED),
317 SMC_WR_REG_MR_WAIT_TIME);
318 if (atomic_dec_and_test(&link->wr_reg_refcnt))
319 wake_up_all(&link->wr_reg_wait);
321 /* timeout - terminate link */
322 smcr_link_down_cond_sched(link);
325 if (rc == -ERESTARTSYS)
327 switch (link->wr_reg_state) {
341 /****************************** receive queue ********************************/
343 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
345 struct smc_wr_rx_handler *h_iter;
348 spin_lock(&smc_wr_rx_hash_lock);
349 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
350 if (h_iter->type == handler->type) {
355 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
357 spin_unlock(&smc_wr_rx_hash_lock);
361 /* Demultiplex a received work request based on the message type to its handler.
362 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
363 * and not being modified any more afterwards so we don't need to lock it.
365 static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
367 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
368 struct smc_wr_rx_handler *handler;
369 struct smc_wr_rx_hdr *wr_rx;
373 if (wc->byte_len < sizeof(*wr_rx))
374 return; /* short message */
375 temp_wr_id = wc->wr_id;
376 index = do_div(temp_wr_id, link->wr_rx_cnt);
377 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
378 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
379 if (handler->type == wr_rx->type)
380 handler->handler(wc, wr_rx);
384 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
386 struct smc_link *link;
389 for (i = 0; i < num; i++) {
390 link = wc[i].qp->qp_context;
391 if (wc[i].status == IB_WC_SUCCESS) {
392 link->wr_rx_tstamp = jiffies;
393 smc_wr_rx_demultiplex(&wc[i]);
394 smc_wr_rx_post(link); /* refill WR RX */
396 /* handle status errors */
397 switch (wc[i].status) {
398 case IB_WC_RETRY_EXC_ERR:
399 case IB_WC_RNR_RETRY_EXC_ERR:
400 case IB_WC_WR_FLUSH_ERR:
401 smcr_link_down_cond_sched(link);
404 smc_wr_rx_post(link); /* refill WR RX */
411 static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
413 struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
414 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
421 memset(&wc, 0, sizeof(wc));
422 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
424 ib_req_notify_cq(dev->roce_cq_recv,
426 | IB_CQ_REPORT_MISSED_EVENTS);
430 smc_wr_rx_process_cqes(&wc[0], rc);
436 void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
438 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
440 tasklet_schedule(&dev->recv_tasklet);
443 int smc_wr_rx_post_init(struct smc_link *link)
448 for (i = 0; i < link->wr_rx_cnt; i++)
449 rc = smc_wr_rx_post(link);
453 /***************************** init, exit, misc ******************************/
455 void smc_wr_remember_qp_attr(struct smc_link *lnk)
457 struct ib_qp_attr *attr = &lnk->qp_attr;
458 struct ib_qp_init_attr init_attr;
460 memset(attr, 0, sizeof(*attr));
461 memset(&init_attr, 0, sizeof(init_attr));
462 ib_query_qp(lnk->roce_qp, attr,
475 IB_QP_MIN_RNR_TIMER |
477 IB_QP_PATH_MIG_STATE |
482 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
483 lnk->qp_attr.cap.max_send_wr);
484 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
485 lnk->qp_attr.cap.max_recv_wr);
488 static void smc_wr_init_sge(struct smc_link *lnk)
492 for (i = 0; i < lnk->wr_tx_cnt; i++) {
493 lnk->wr_tx_sges[i].addr =
494 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
495 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
496 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
497 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
498 lnk->roce_pd->local_dma_lkey;
499 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
500 lnk->roce_pd->local_dma_lkey;
501 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
502 lnk->roce_pd->local_dma_lkey;
503 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
504 lnk->roce_pd->local_dma_lkey;
505 lnk->wr_tx_ibs[i].next = NULL;
506 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
507 lnk->wr_tx_ibs[i].num_sge = 1;
508 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
509 lnk->wr_tx_ibs[i].send_flags =
510 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
511 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
512 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
513 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
514 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
515 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
516 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
518 for (i = 0; i < lnk->wr_rx_cnt; i++) {
519 lnk->wr_rx_sges[i].addr =
520 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
521 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
522 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
523 lnk->wr_rx_ibs[i].next = NULL;
524 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
525 lnk->wr_rx_ibs[i].num_sge = 1;
527 lnk->wr_reg.wr.next = NULL;
528 lnk->wr_reg.wr.num_sge = 0;
529 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
530 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
531 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
534 void smc_wr_free_link(struct smc_link *lnk)
536 struct ib_device *ibdev;
540 ibdev = lnk->smcibdev->ibdev;
542 smc_wr_wakeup_reg_wait(lnk);
543 smc_wr_wakeup_tx_wait(lnk);
545 smc_wr_tx_wait_no_pending_sends(lnk);
546 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
547 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
549 if (lnk->wr_rx_dma_addr) {
550 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
551 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
553 lnk->wr_rx_dma_addr = 0;
555 if (lnk->wr_tx_dma_addr) {
556 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
557 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
559 lnk->wr_tx_dma_addr = 0;
563 void smc_wr_free_link_mem(struct smc_link *lnk)
565 kfree(lnk->wr_tx_compl);
566 lnk->wr_tx_compl = NULL;
567 kfree(lnk->wr_tx_pends);
568 lnk->wr_tx_pends = NULL;
569 kfree(lnk->wr_tx_mask);
570 lnk->wr_tx_mask = NULL;
571 kfree(lnk->wr_tx_sges);
572 lnk->wr_tx_sges = NULL;
573 kfree(lnk->wr_tx_rdma_sges);
574 lnk->wr_tx_rdma_sges = NULL;
575 kfree(lnk->wr_rx_sges);
576 lnk->wr_rx_sges = NULL;
577 kfree(lnk->wr_tx_rdmas);
578 lnk->wr_tx_rdmas = NULL;
579 kfree(lnk->wr_rx_ibs);
580 lnk->wr_rx_ibs = NULL;
581 kfree(lnk->wr_tx_ibs);
582 lnk->wr_tx_ibs = NULL;
583 kfree(lnk->wr_tx_bufs);
584 lnk->wr_tx_bufs = NULL;
585 kfree(lnk->wr_rx_bufs);
586 lnk->wr_rx_bufs = NULL;
589 int smc_wr_alloc_link_mem(struct smc_link *link)
591 /* allocate link related memory */
592 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
593 if (!link->wr_tx_bufs)
595 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
597 if (!link->wr_rx_bufs)
598 goto no_mem_wr_tx_bufs;
599 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
601 if (!link->wr_tx_ibs)
602 goto no_mem_wr_rx_bufs;
603 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
604 sizeof(link->wr_rx_ibs[0]),
606 if (!link->wr_rx_ibs)
607 goto no_mem_wr_tx_ibs;
608 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
609 sizeof(link->wr_tx_rdmas[0]),
611 if (!link->wr_tx_rdmas)
612 goto no_mem_wr_rx_ibs;
613 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
614 sizeof(link->wr_tx_rdma_sges[0]),
616 if (!link->wr_tx_rdma_sges)
617 goto no_mem_wr_tx_rdmas;
618 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
620 if (!link->wr_tx_sges)
621 goto no_mem_wr_tx_rdma_sges;
622 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
623 sizeof(link->wr_rx_sges[0]),
625 if (!link->wr_rx_sges)
626 goto no_mem_wr_tx_sges;
627 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
628 sizeof(*link->wr_tx_mask),
630 if (!link->wr_tx_mask)
631 goto no_mem_wr_rx_sges;
632 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
633 sizeof(link->wr_tx_pends[0]),
635 if (!link->wr_tx_pends)
636 goto no_mem_wr_tx_mask;
637 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
638 sizeof(link->wr_tx_compl[0]),
640 if (!link->wr_tx_compl)
641 goto no_mem_wr_tx_pends;
645 kfree(link->wr_tx_pends);
647 kfree(link->wr_tx_mask);
649 kfree(link->wr_rx_sges);
651 kfree(link->wr_tx_sges);
652 no_mem_wr_tx_rdma_sges:
653 kfree(link->wr_tx_rdma_sges);
655 kfree(link->wr_tx_rdmas);
657 kfree(link->wr_rx_ibs);
659 kfree(link->wr_tx_ibs);
661 kfree(link->wr_rx_bufs);
663 kfree(link->wr_tx_bufs);
668 void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
670 tasklet_kill(&smcibdev->recv_tasklet);
671 tasklet_kill(&smcibdev->send_tasklet);
674 void smc_wr_add_dev(struct smc_ib_device *smcibdev)
676 tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn);
677 tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
680 int smc_wr_create_link(struct smc_link *lnk)
682 struct ib_device *ibdev = lnk->smcibdev->ibdev;
685 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
687 lnk->wr_rx_dma_addr = ib_dma_map_single(
688 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
690 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
691 lnk->wr_rx_dma_addr = 0;
695 lnk->wr_tx_dma_addr = ib_dma_map_single(
696 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
698 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
702 smc_wr_init_sge(lnk);
703 memset(lnk->wr_tx_mask, 0,
704 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
705 init_waitqueue_head(&lnk->wr_tx_wait);
706 atomic_set(&lnk->wr_tx_refcnt, 0);
707 init_waitqueue_head(&lnk->wr_reg_wait);
708 atomic_set(&lnk->wr_reg_refcnt, 0);
712 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
713 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
715 lnk->wr_rx_dma_addr = 0;