2 * NVMe over Fabrics RDMA target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/atomic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/nvme.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/wait.h>
25 #include <linux/inet.h>
26 #include <asm/unaligned.h>
28 #include <rdma/ib_verbs.h>
29 #include <rdma/rdma_cm.h>
32 #include <linux/nvme-rdma.h>
36 * We allow up to a page of inline data to go with the SQE
38 #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
40 struct nvmet_rdma_cmd {
44 struct scatterlist inline_sg;
45 struct page *inline_page;
46 struct nvme_command *nvme_cmd;
47 struct nvmet_rdma_queue *queue;
51 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
52 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
55 struct nvmet_rdma_rsp {
56 struct ib_sge send_sge;
57 struct ib_cqe send_cqe;
58 struct ib_send_wr send_wr;
60 struct nvmet_rdma_cmd *cmd;
61 struct nvmet_rdma_queue *queue;
63 struct ib_cqe read_cqe;
64 struct rdma_rw_ctx rw;
73 struct list_head wait_list;
74 struct list_head free_list;
77 enum nvmet_rdma_queue_state {
78 NVMET_RDMA_Q_CONNECTING,
80 NVMET_RDMA_Q_DISCONNECTING,
81 NVMET_RDMA_IN_DEVICE_REMOVAL,
84 struct nvmet_rdma_queue {
85 struct rdma_cm_id *cm_id;
86 struct nvmet_port *port;
89 struct nvmet_rdma_device *dev;
90 spinlock_t state_lock;
91 enum nvmet_rdma_queue_state state;
92 struct nvmet_cq nvme_cq;
93 struct nvmet_sq nvme_sq;
95 struct nvmet_rdma_rsp *rsps;
96 struct list_head free_rsps;
98 struct nvmet_rdma_cmd *cmds;
100 struct work_struct release_work;
101 struct list_head rsp_wait_list;
102 struct list_head rsp_wr_wait_list;
103 spinlock_t rsp_wr_wait_lock;
110 struct list_head queue_list;
113 struct nvmet_rdma_device {
114 struct ib_device *device;
117 struct nvmet_rdma_cmd *srq_cmds;
120 struct list_head entry;
123 static bool nvmet_rdma_use_srq;
124 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
125 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
127 static DEFINE_IDA(nvmet_rdma_queue_ida);
128 static LIST_HEAD(nvmet_rdma_queue_list);
129 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
131 static LIST_HEAD(device_list);
132 static DEFINE_MUTEX(device_list_mutex);
134 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
135 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
137 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
138 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
139 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
140 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
141 struct nvmet_rdma_rsp *r);
142 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
145 static struct nvmet_fabrics_ops nvmet_rdma_ops;
147 /* XXX: really should move to a generic header sooner or later.. */
148 static inline u32 get_unaligned_le24(const u8 *p)
150 return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
153 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
155 return nvme_is_write(rsp->req.cmd) &&
157 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
160 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
162 return !nvme_is_write(rsp->req.cmd) &&
164 !rsp->req.rsp->status &&
165 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
168 static inline struct nvmet_rdma_rsp *
169 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
171 struct nvmet_rdma_rsp *rsp;
174 spin_lock_irqsave(&queue->rsps_lock, flags);
175 rsp = list_first_entry_or_null(&queue->free_rsps,
176 struct nvmet_rdma_rsp, free_list);
178 list_del(&rsp->free_list);
179 spin_unlock_irqrestore(&queue->rsps_lock, flags);
181 if (unlikely(!rsp)) {
184 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
187 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
193 rsp->allocated = true;
200 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
204 if (unlikely(rsp->allocated)) {
205 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
210 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
211 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
212 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
215 static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
217 struct scatterlist *sg;
223 for_each_sg(sgl, sg, nents, count)
224 __free_page(sg_page(sg));
228 static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
231 struct scatterlist *sg;
236 nent = DIV_ROUND_UP(length, PAGE_SIZE);
237 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
241 sg_init_table(sg, nent);
244 u32 page_len = min_t(u32, length, PAGE_SIZE);
246 page = alloc_page(GFP_KERNEL);
250 sg_set_page(&sg[i], page, page_len, 0);
261 __free_page(sg_page(&sg[i]));
265 return NVME_SC_INTERNAL;
268 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
269 struct nvmet_rdma_cmd *c, bool admin)
271 /* NVMe command / RDMA RECV */
272 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
276 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
277 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
278 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
281 c->sge[0].length = sizeof(*c->nvme_cmd);
282 c->sge[0].lkey = ndev->pd->local_dma_lkey;
285 c->inline_page = alloc_pages(GFP_KERNEL,
286 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
289 c->sge[1].addr = ib_dma_map_page(ndev->device,
290 c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
292 if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
293 goto out_free_inline_page;
294 c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
295 c->sge[1].lkey = ndev->pd->local_dma_lkey;
298 c->cqe.done = nvmet_rdma_recv_done;
300 c->wr.wr_cqe = &c->cqe;
301 c->wr.sg_list = c->sge;
302 c->wr.num_sge = admin ? 1 : 2;
306 out_free_inline_page:
308 __free_pages(c->inline_page,
309 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
312 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
313 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
321 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
322 struct nvmet_rdma_cmd *c, bool admin)
325 ib_dma_unmap_page(ndev->device, c->sge[1].addr,
326 NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
327 __free_pages(c->inline_page,
328 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
330 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
331 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
335 static struct nvmet_rdma_cmd *
336 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
337 int nr_cmds, bool admin)
339 struct nvmet_rdma_cmd *cmds;
340 int ret = -EINVAL, i;
342 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
346 for (i = 0; i < nr_cmds; i++) {
347 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
356 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
362 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
363 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
367 for (i = 0; i < nr_cmds; i++)
368 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
372 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
373 struct nvmet_rdma_rsp *r)
375 /* NVMe CQE / RDMA SEND */
376 r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
380 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
381 sizeof(*r->req.rsp), DMA_TO_DEVICE);
382 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
385 r->send_sge.length = sizeof(*r->req.rsp);
386 r->send_sge.lkey = ndev->pd->local_dma_lkey;
388 r->send_cqe.done = nvmet_rdma_send_done;
390 r->send_wr.wr_cqe = &r->send_cqe;
391 r->send_wr.sg_list = &r->send_sge;
392 r->send_wr.num_sge = 1;
393 r->send_wr.send_flags = IB_SEND_SIGNALED;
395 /* Data In / RDMA READ */
396 r->read_cqe.done = nvmet_rdma_read_data_done;
405 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
406 struct nvmet_rdma_rsp *r)
408 ib_dma_unmap_single(ndev->device, r->send_sge.addr,
409 sizeof(*r->req.rsp), DMA_TO_DEVICE);
414 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
416 struct nvmet_rdma_device *ndev = queue->dev;
417 int nr_rsps = queue->recv_queue_size * 2;
418 int ret = -EINVAL, i;
420 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
425 for (i = 0; i < nr_rsps; i++) {
426 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
428 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
432 list_add_tail(&rsp->free_list, &queue->free_rsps);
439 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
441 list_del(&rsp->free_list);
442 nvmet_rdma_free_rsp(ndev, rsp);
449 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
451 struct nvmet_rdma_device *ndev = queue->dev;
452 int i, nr_rsps = queue->recv_queue_size * 2;
454 for (i = 0; i < nr_rsps; i++) {
455 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
457 list_del(&rsp->free_list);
458 nvmet_rdma_free_rsp(ndev, rsp);
463 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
464 struct nvmet_rdma_cmd *cmd)
466 struct ib_recv_wr *bad_wr;
468 ib_dma_sync_single_for_device(ndev->device,
469 cmd->sge[0].addr, cmd->sge[0].length,
473 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
474 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
477 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
479 spin_lock(&queue->rsp_wr_wait_lock);
480 while (!list_empty(&queue->rsp_wr_wait_list)) {
481 struct nvmet_rdma_rsp *rsp;
484 rsp = list_entry(queue->rsp_wr_wait_list.next,
485 struct nvmet_rdma_rsp, wait_list);
486 list_del(&rsp->wait_list);
488 spin_unlock(&queue->rsp_wr_wait_lock);
489 ret = nvmet_rdma_execute_command(rsp);
490 spin_lock(&queue->rsp_wr_wait_lock);
493 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
497 spin_unlock(&queue->rsp_wr_wait_lock);
501 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
503 struct nvmet_rdma_queue *queue = rsp->queue;
505 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
508 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
509 queue->cm_id->port_num, rsp->req.sg,
510 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
513 if (rsp->req.sg != &rsp->cmd->inline_sg)
514 nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
516 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
517 nvmet_rdma_process_wr_wait_list(queue);
519 nvmet_rdma_put_rsp(rsp);
522 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
524 if (queue->nvme_sq.ctrl) {
525 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
528 * we didn't setup the controller yet in case
529 * of admin connect error, just disconnect and
532 nvmet_rdma_queue_disconnect(queue);
536 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
538 struct nvmet_rdma_rsp *rsp =
539 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
540 struct nvmet_rdma_queue *queue = cq->cq_context;
542 nvmet_rdma_release_rsp(rsp);
544 if (unlikely(wc->status != IB_WC_SUCCESS &&
545 wc->status != IB_WC_WR_FLUSH_ERR)) {
546 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
547 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
548 nvmet_rdma_error_comp(queue);
552 static void nvmet_rdma_queue_response(struct nvmet_req *req)
554 struct nvmet_rdma_rsp *rsp =
555 container_of(req, struct nvmet_rdma_rsp, req);
556 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
557 struct ib_send_wr *first_wr, *bad_wr;
559 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
560 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
561 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
563 rsp->send_wr.opcode = IB_WR_SEND;
566 if (nvmet_rdma_need_data_out(rsp))
567 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
568 cm_id->port_num, NULL, &rsp->send_wr);
570 first_wr = &rsp->send_wr;
572 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
574 ib_dma_sync_single_for_device(rsp->queue->dev->device,
575 rsp->send_sge.addr, rsp->send_sge.length,
578 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
579 pr_err("sending cmd response failed\n");
580 nvmet_rdma_release_rsp(rsp);
584 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
586 struct nvmet_rdma_rsp *rsp =
587 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
588 struct nvmet_rdma_queue *queue = cq->cq_context;
590 WARN_ON(rsp->n_rdma <= 0);
591 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
592 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
593 queue->cm_id->port_num, rsp->req.sg,
594 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
597 if (unlikely(wc->status != IB_WC_SUCCESS)) {
598 nvmet_req_uninit(&rsp->req);
599 nvmet_rdma_release_rsp(rsp);
600 if (wc->status != IB_WC_WR_FLUSH_ERR) {
601 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
602 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
603 nvmet_rdma_error_comp(queue);
608 rsp->req.execute(&rsp->req);
611 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
614 sg_init_table(&rsp->cmd->inline_sg, 1);
615 sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
616 rsp->req.sg = &rsp->cmd->inline_sg;
620 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
622 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
623 u64 off = le64_to_cpu(sgl->addr);
624 u32 len = le32_to_cpu(sgl->length);
626 if (!nvme_is_write(rsp->req.cmd))
627 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
629 if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
630 pr_err("invalid inline data offset!\n");
631 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
634 /* no data command? */
638 nvmet_rdma_use_inline_sg(rsp, len, off);
639 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
643 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
644 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
646 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
647 u64 addr = le64_to_cpu(sgl->addr);
648 u32 len = get_unaligned_le24(sgl->length);
649 u32 key = get_unaligned_le32(sgl->key);
653 /* no data command? */
657 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
662 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
663 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
664 nvmet_data_dir(&rsp->req));
666 return NVME_SC_INTERNAL;
670 rsp->invalidate_rkey = key;
671 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
677 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
679 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
681 switch (sgl->type >> 4) {
682 case NVME_SGL_FMT_DATA_DESC:
683 switch (sgl->type & 0xf) {
684 case NVME_SGL_FMT_OFFSET:
685 return nvmet_rdma_map_sgl_inline(rsp);
687 pr_err("invalid SGL subtype: %#x\n", sgl->type);
688 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
690 case NVME_KEY_SGL_FMT_DATA_DESC:
691 switch (sgl->type & 0xf) {
692 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
693 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
694 case NVME_SGL_FMT_ADDRESS:
695 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
697 pr_err("invalid SGL subtype: %#x\n", sgl->type);
698 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
701 pr_err("invalid SGL type: %#x\n", sgl->type);
702 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
706 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
708 struct nvmet_rdma_queue *queue = rsp->queue;
710 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
711 &queue->sq_wr_avail) < 0)) {
712 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
713 1 + rsp->n_rdma, queue->idx,
714 queue->nvme_sq.ctrl->cntlid);
715 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
719 if (nvmet_rdma_need_data_in(rsp)) {
720 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
721 queue->cm_id->port_num, &rsp->read_cqe, NULL))
722 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
724 rsp->req.execute(&rsp->req);
730 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
731 struct nvmet_rdma_rsp *cmd)
735 ib_dma_sync_single_for_cpu(queue->dev->device,
736 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
738 ib_dma_sync_single_for_cpu(queue->dev->device,
739 cmd->send_sge.addr, cmd->send_sge.length,
742 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
743 &queue->nvme_sq, &nvmet_rdma_ops))
746 status = nvmet_rdma_map_sgl(cmd);
750 if (unlikely(!nvmet_rdma_execute_command(cmd))) {
751 spin_lock(&queue->rsp_wr_wait_lock);
752 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
753 spin_unlock(&queue->rsp_wr_wait_lock);
759 nvmet_req_complete(&cmd->req, status);
762 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
764 struct nvmet_rdma_cmd *cmd =
765 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
766 struct nvmet_rdma_queue *queue = cq->cq_context;
767 struct nvmet_rdma_rsp *rsp;
769 if (unlikely(wc->status != IB_WC_SUCCESS)) {
770 if (wc->status != IB_WC_WR_FLUSH_ERR) {
771 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
772 wc->wr_cqe, ib_wc_status_msg(wc->status),
774 nvmet_rdma_error_comp(queue);
779 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
780 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
781 nvmet_rdma_error_comp(queue);
786 rsp = nvmet_rdma_get_rsp(queue);
787 if (unlikely(!rsp)) {
789 * we get here only under memory pressure,
790 * silently drop and have the host retry
791 * as we can't even fail it.
793 nvmet_rdma_post_recv(queue->dev, cmd);
799 rsp->req.cmd = cmd->nvme_cmd;
800 rsp->req.port = queue->port;
803 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
806 spin_lock_irqsave(&queue->state_lock, flags);
807 if (queue->state == NVMET_RDMA_Q_CONNECTING)
808 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
810 nvmet_rdma_put_rsp(rsp);
811 spin_unlock_irqrestore(&queue->state_lock, flags);
815 nvmet_rdma_handle_command(queue, rsp);
818 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
823 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
824 ib_destroy_srq(ndev->srq);
827 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
829 struct ib_srq_init_attr srq_attr = { NULL, };
834 srq_size = 4095; /* XXX: tune */
836 srq_attr.attr.max_wr = srq_size;
837 srq_attr.attr.max_sge = 2;
838 srq_attr.attr.srq_limit = 0;
839 srq_attr.srq_type = IB_SRQT_BASIC;
840 srq = ib_create_srq(ndev->pd, &srq_attr);
843 * If SRQs aren't supported we just go ahead and use normal
844 * non-shared receive queues.
846 pr_info("SRQ requested but not supported.\n");
850 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
851 if (IS_ERR(ndev->srq_cmds)) {
852 ret = PTR_ERR(ndev->srq_cmds);
853 goto out_destroy_srq;
857 ndev->srq_size = srq_size;
859 for (i = 0; i < srq_size; i++)
860 nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
869 static void nvmet_rdma_free_dev(struct kref *ref)
871 struct nvmet_rdma_device *ndev =
872 container_of(ref, struct nvmet_rdma_device, ref);
874 mutex_lock(&device_list_mutex);
875 list_del(&ndev->entry);
876 mutex_unlock(&device_list_mutex);
878 nvmet_rdma_destroy_srq(ndev);
879 ib_dealloc_pd(ndev->pd);
884 static struct nvmet_rdma_device *
885 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
887 struct nvmet_rdma_device *ndev;
890 mutex_lock(&device_list_mutex);
891 list_for_each_entry(ndev, &device_list, entry) {
892 if (ndev->device->node_guid == cm_id->device->node_guid &&
893 kref_get_unless_zero(&ndev->ref))
897 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
901 ndev->device = cm_id->device;
902 kref_init(&ndev->ref);
904 ndev->pd = ib_alloc_pd(ndev->device, 0);
905 if (IS_ERR(ndev->pd))
908 if (nvmet_rdma_use_srq) {
909 ret = nvmet_rdma_init_srq(ndev);
914 list_add(&ndev->entry, &device_list);
916 mutex_unlock(&device_list_mutex);
917 pr_debug("added %s.\n", ndev->device->name);
921 ib_dealloc_pd(ndev->pd);
925 mutex_unlock(&device_list_mutex);
929 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
931 struct ib_qp_init_attr qp_attr;
932 struct nvmet_rdma_device *ndev = queue->dev;
933 int comp_vector, nr_cqe, ret, i;
936 * Spread the io queues across completion vectors,
937 * but still keep all admin queues on vector 0.
939 comp_vector = !queue->host_qid ? 0 :
940 queue->idx % ndev->device->num_comp_vectors;
943 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
945 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
947 queue->cq = ib_alloc_cq(ndev->device, queue,
948 nr_cqe + 1, comp_vector,
950 if (IS_ERR(queue->cq)) {
951 ret = PTR_ERR(queue->cq);
952 pr_err("failed to create CQ cqe= %d ret= %d\n",
957 memset(&qp_attr, 0, sizeof(qp_attr));
958 qp_attr.qp_context = queue;
959 qp_attr.event_handler = nvmet_rdma_qp_event;
960 qp_attr.send_cq = queue->cq;
961 qp_attr.recv_cq = queue->cq;
962 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
963 qp_attr.qp_type = IB_QPT_RC;
965 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
966 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
967 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
968 ndev->device->attrs.max_sge);
971 qp_attr.srq = ndev->srq;
974 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
975 qp_attr.cap.max_recv_sge = 2;
978 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
980 pr_err("failed to create_qp ret= %d\n", ret);
984 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
986 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
987 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
988 qp_attr.cap.max_send_wr, queue->cm_id);
991 for (i = 0; i < queue->recv_queue_size; i++) {
992 queue->cmds[i].queue = queue;
993 nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1001 ib_free_cq(queue->cq);
1005 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1007 ib_drain_qp(queue->cm_id->qp);
1008 rdma_destroy_qp(queue->cm_id);
1009 ib_free_cq(queue->cq);
1012 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1014 pr_info("freeing queue %d\n", queue->idx);
1016 nvmet_sq_destroy(&queue->nvme_sq);
1018 nvmet_rdma_destroy_queue_ib(queue);
1019 if (!queue->dev->srq) {
1020 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1021 queue->recv_queue_size,
1024 nvmet_rdma_free_rsps(queue);
1025 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1029 static void nvmet_rdma_release_queue_work(struct work_struct *w)
1031 struct nvmet_rdma_queue *queue =
1032 container_of(w, struct nvmet_rdma_queue, release_work);
1033 struct rdma_cm_id *cm_id = queue->cm_id;
1034 struct nvmet_rdma_device *dev = queue->dev;
1035 enum nvmet_rdma_queue_state state = queue->state;
1037 nvmet_rdma_free_queue(queue);
1039 if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
1040 rdma_destroy_id(cm_id);
1042 kref_put(&dev->ref, nvmet_rdma_free_dev);
1046 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1047 struct nvmet_rdma_queue *queue)
1049 struct nvme_rdma_cm_req *req;
1051 req = (struct nvme_rdma_cm_req *)conn->private_data;
1052 if (!req || conn->private_data_len == 0)
1053 return NVME_RDMA_CM_INVALID_LEN;
1055 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1056 return NVME_RDMA_CM_INVALID_RECFMT;
1058 queue->host_qid = le16_to_cpu(req->qid);
1061 * req->hsqsize corresponds to our recv queue size plus 1
1062 * req->hrqsize corresponds to our send queue size
1064 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1065 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1067 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
1068 return NVME_RDMA_CM_INVALID_HSQSIZE;
1070 /* XXX: Should we enforce some kind of max for IO queues? */
1075 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1076 enum nvme_rdma_cm_status status)
1078 struct nvme_rdma_cm_rej rej;
1080 pr_debug("rejecting connect request: status %d (%s)\n",
1081 status, nvme_rdma_cm_msg(status));
1083 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1084 rej.sts = cpu_to_le16(status);
1086 return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1089 static struct nvmet_rdma_queue *
1090 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1091 struct rdma_cm_id *cm_id,
1092 struct rdma_cm_event *event)
1094 struct nvmet_rdma_queue *queue;
1097 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1099 ret = NVME_RDMA_CM_NO_RSC;
1103 ret = nvmet_sq_init(&queue->nvme_sq);
1105 ret = NVME_RDMA_CM_NO_RSC;
1106 goto out_free_queue;
1109 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1111 goto out_destroy_sq;
1114 * Schedules the actual release because calling rdma_destroy_id from
1115 * inside a CM callback would trigger a deadlock. (great API design..)
1117 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1119 queue->cm_id = cm_id;
1121 spin_lock_init(&queue->state_lock);
1122 queue->state = NVMET_RDMA_Q_CONNECTING;
1123 INIT_LIST_HEAD(&queue->rsp_wait_list);
1124 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1125 spin_lock_init(&queue->rsp_wr_wait_lock);
1126 INIT_LIST_HEAD(&queue->free_rsps);
1127 spin_lock_init(&queue->rsps_lock);
1128 INIT_LIST_HEAD(&queue->queue_list);
1130 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1131 if (queue->idx < 0) {
1132 ret = NVME_RDMA_CM_NO_RSC;
1133 goto out_destroy_sq;
1136 ret = nvmet_rdma_alloc_rsps(queue);
1138 ret = NVME_RDMA_CM_NO_RSC;
1139 goto out_ida_remove;
1143 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1144 queue->recv_queue_size,
1146 if (IS_ERR(queue->cmds)) {
1147 ret = NVME_RDMA_CM_NO_RSC;
1148 goto out_free_responses;
1152 ret = nvmet_rdma_create_queue_ib(queue);
1154 pr_err("%s: creating RDMA queue failed (%d).\n",
1156 ret = NVME_RDMA_CM_NO_RSC;
1164 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1165 queue->recv_queue_size,
1169 nvmet_rdma_free_rsps(queue);
1171 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1173 nvmet_sq_destroy(&queue->nvme_sq);
1177 nvmet_rdma_cm_reject(cm_id, ret);
1181 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1183 struct nvmet_rdma_queue *queue = priv;
1185 switch (event->event) {
1186 case IB_EVENT_COMM_EST:
1187 rdma_notify(queue->cm_id, event->event);
1190 pr_err("received IB QP event: %s (%d)\n",
1191 ib_event_msg(event->event), event->event);
1196 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1197 struct nvmet_rdma_queue *queue,
1198 struct rdma_conn_param *p)
1200 struct rdma_conn_param param = { };
1201 struct nvme_rdma_cm_rep priv = { };
1204 param.rnr_retry_count = 7;
1205 param.flow_control = 1;
1206 param.initiator_depth = min_t(u8, p->initiator_depth,
1207 queue->dev->device->attrs.max_qp_init_rd_atom);
1208 param.private_data = &priv;
1209 param.private_data_len = sizeof(priv);
1210 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1211 priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1213 ret = rdma_accept(cm_id, ¶m);
1215 pr_err("rdma_accept failed (error code = %d)\n", ret);
1220 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1221 struct rdma_cm_event *event)
1223 struct nvmet_rdma_device *ndev;
1224 struct nvmet_rdma_queue *queue;
1227 ndev = nvmet_rdma_find_get_device(cm_id);
1229 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1230 return -ECONNREFUSED;
1233 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1238 queue->port = cm_id->context;
1240 if (queue->host_qid == 0) {
1241 /* Let inflight controller teardown complete */
1242 flush_scheduled_work();
1245 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1249 mutex_lock(&nvmet_rdma_queue_mutex);
1250 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1251 mutex_unlock(&nvmet_rdma_queue_mutex);
1256 nvmet_rdma_free_queue(queue);
1258 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1263 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1265 unsigned long flags;
1267 spin_lock_irqsave(&queue->state_lock, flags);
1268 if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1269 pr_warn("trying to establish a connected queue\n");
1272 queue->state = NVMET_RDMA_Q_LIVE;
1274 while (!list_empty(&queue->rsp_wait_list)) {
1275 struct nvmet_rdma_rsp *cmd;
1277 cmd = list_first_entry(&queue->rsp_wait_list,
1278 struct nvmet_rdma_rsp, wait_list);
1279 list_del(&cmd->wait_list);
1281 spin_unlock_irqrestore(&queue->state_lock, flags);
1282 nvmet_rdma_handle_command(queue, cmd);
1283 spin_lock_irqsave(&queue->state_lock, flags);
1287 spin_unlock_irqrestore(&queue->state_lock, flags);
1290 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1292 bool disconnect = false;
1293 unsigned long flags;
1295 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1297 spin_lock_irqsave(&queue->state_lock, flags);
1298 switch (queue->state) {
1299 case NVMET_RDMA_Q_CONNECTING:
1300 case NVMET_RDMA_Q_LIVE:
1301 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1302 case NVMET_RDMA_IN_DEVICE_REMOVAL:
1305 case NVMET_RDMA_Q_DISCONNECTING:
1308 spin_unlock_irqrestore(&queue->state_lock, flags);
1311 rdma_disconnect(queue->cm_id);
1312 schedule_work(&queue->release_work);
1316 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1318 bool disconnect = false;
1320 mutex_lock(&nvmet_rdma_queue_mutex);
1321 if (!list_empty(&queue->queue_list)) {
1322 list_del_init(&queue->queue_list);
1325 mutex_unlock(&nvmet_rdma_queue_mutex);
1328 __nvmet_rdma_queue_disconnect(queue);
1331 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1332 struct nvmet_rdma_queue *queue)
1334 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1336 mutex_lock(&nvmet_rdma_queue_mutex);
1337 if (!list_empty(&queue->queue_list))
1338 list_del_init(&queue->queue_list);
1339 mutex_unlock(&nvmet_rdma_queue_mutex);
1341 pr_err("failed to connect queue %d\n", queue->idx);
1342 schedule_work(&queue->release_work);
1346 * nvme_rdma_device_removal() - Handle RDMA device removal
1347 * @cm_id: rdma_cm id, used for nvmet port
1348 * @queue: nvmet rdma queue (cm id qp_context)
1350 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1351 * to unplug. Note that this event can be generated on a normal
1352 * queue cm_id and/or a device bound listener cm_id (where in this
1353 * case queue will be null).
1355 * We registered an ib_client to handle device removal for queues,
1356 * so we only need to handle the listening port cm_ids. In this case
1357 * we nullify the priv to prevent double cm_id destruction and destroying
1358 * the cm_id implicitely by returning a non-zero rc to the callout.
1360 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1361 struct nvmet_rdma_queue *queue)
1363 struct nvmet_port *port;
1367 * This is a queue cm_id. we have registered
1368 * an ib_client to handle queues removal
1369 * so don't interfear and just return.
1374 port = cm_id->context;
1377 * This is a listener cm_id. Make sure that
1378 * future remove_port won't invoke a double
1379 * cm_id destroy. use atomic xchg to make sure
1380 * we don't compete with remove_port.
1382 if (xchg(&port->priv, NULL) != cm_id)
1386 * We need to return 1 so that the core will destroy
1387 * it's own ID. What a great API design..
1392 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1393 struct rdma_cm_event *event)
1395 struct nvmet_rdma_queue *queue = NULL;
1399 queue = cm_id->qp->qp_context;
1401 pr_debug("%s (%d): status %d id %p\n",
1402 rdma_event_msg(event->event), event->event,
1403 event->status, cm_id);
1405 switch (event->event) {
1406 case RDMA_CM_EVENT_CONNECT_REQUEST:
1407 ret = nvmet_rdma_queue_connect(cm_id, event);
1409 case RDMA_CM_EVENT_ESTABLISHED:
1410 nvmet_rdma_queue_established(queue);
1412 case RDMA_CM_EVENT_ADDR_CHANGE:
1413 case RDMA_CM_EVENT_DISCONNECTED:
1414 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1416 * We might end up here when we already freed the qp
1417 * which means queue release sequence is in progress,
1418 * so don't get in the way...
1421 nvmet_rdma_queue_disconnect(queue);
1423 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1424 ret = nvmet_rdma_device_removal(cm_id, queue);
1426 case RDMA_CM_EVENT_REJECTED:
1427 pr_debug("Connection rejected: %s\n",
1428 rdma_reject_msg(cm_id, event->status));
1430 case RDMA_CM_EVENT_UNREACHABLE:
1431 case RDMA_CM_EVENT_CONNECT_ERROR:
1432 nvmet_rdma_queue_connect_fail(cm_id, queue);
1435 pr_err("received unrecognized RDMA CM event %d\n",
1443 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1445 struct nvmet_rdma_queue *queue;
1448 mutex_lock(&nvmet_rdma_queue_mutex);
1449 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1450 if (queue->nvme_sq.ctrl == ctrl) {
1451 list_del_init(&queue->queue_list);
1452 mutex_unlock(&nvmet_rdma_queue_mutex);
1454 __nvmet_rdma_queue_disconnect(queue);
1458 mutex_unlock(&nvmet_rdma_queue_mutex);
1461 static int nvmet_rdma_add_port(struct nvmet_port *port)
1463 struct rdma_cm_id *cm_id;
1464 struct sockaddr_storage addr = { };
1465 __kernel_sa_family_t af;
1468 switch (port->disc_addr.adrfam) {
1469 case NVMF_ADDR_FAMILY_IP4:
1472 case NVMF_ADDR_FAMILY_IP6:
1476 pr_err("address family %d not supported\n",
1477 port->disc_addr.adrfam);
1481 ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
1482 port->disc_addr.trsvcid, &addr);
1484 pr_err("malformed ip/port passed: %s:%s\n",
1485 port->disc_addr.traddr, port->disc_addr.trsvcid);
1489 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1490 RDMA_PS_TCP, IB_QPT_RC);
1491 if (IS_ERR(cm_id)) {
1492 pr_err("CM ID creation failed\n");
1493 return PTR_ERR(cm_id);
1497 * Allow both IPv4 and IPv6 sockets to bind a single port
1500 ret = rdma_set_afonly(cm_id, 1);
1502 pr_err("rdma_set_afonly failed (%d)\n", ret);
1503 goto out_destroy_id;
1506 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
1508 pr_err("binding CM ID to %pISpcs failed (%d)\n",
1509 (struct sockaddr *)&addr, ret);
1510 goto out_destroy_id;
1513 ret = rdma_listen(cm_id, 128);
1515 pr_err("listening to %pISpcs failed (%d)\n",
1516 (struct sockaddr *)&addr, ret);
1517 goto out_destroy_id;
1520 pr_info("enabling port %d (%pISpcs)\n",
1521 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
1526 rdma_destroy_id(cm_id);
1530 static void nvmet_rdma_remove_port(struct nvmet_port *port)
1532 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1535 rdma_destroy_id(cm_id);
1538 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
1539 .owner = THIS_MODULE,
1540 .type = NVMF_TRTYPE_RDMA,
1541 .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
1543 .has_keyed_sgls = 1,
1544 .add_port = nvmet_rdma_add_port,
1545 .remove_port = nvmet_rdma_remove_port,
1546 .queue_response = nvmet_rdma_queue_response,
1547 .delete_ctrl = nvmet_rdma_delete_ctrl,
1550 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
1552 struct nvmet_rdma_queue *queue, *tmp;
1554 /* Device is being removed, delete all queues using this device */
1555 mutex_lock(&nvmet_rdma_queue_mutex);
1556 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1558 if (queue->dev->device != ib_device)
1561 pr_info("Removing queue %d\n", queue->idx);
1562 list_del_init(&queue->queue_list);
1563 __nvmet_rdma_queue_disconnect(queue);
1565 mutex_unlock(&nvmet_rdma_queue_mutex);
1567 flush_scheduled_work();
1570 static struct ib_client nvmet_rdma_ib_client = {
1571 .name = "nvmet_rdma",
1572 .remove = nvmet_rdma_remove_one
1575 static int __init nvmet_rdma_init(void)
1579 ret = ib_register_client(&nvmet_rdma_ib_client);
1583 ret = nvmet_register_transport(&nvmet_rdma_ops);
1590 ib_unregister_client(&nvmet_rdma_ib_client);
1594 static void __exit nvmet_rdma_exit(void)
1596 struct nvmet_rdma_queue *queue;
1598 nvmet_unregister_transport(&nvmet_rdma_ops);
1600 flush_scheduled_work();
1602 mutex_lock(&nvmet_rdma_queue_mutex);
1603 while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
1604 struct nvmet_rdma_queue, queue_list))) {
1605 list_del_init(&queue->queue_list);
1607 mutex_unlock(&nvmet_rdma_queue_mutex);
1608 __nvmet_rdma_queue_disconnect(queue);
1609 mutex_lock(&nvmet_rdma_queue_mutex);
1611 mutex_unlock(&nvmet_rdma_queue_mutex);
1613 flush_scheduled_work();
1614 ib_unregister_client(&nvmet_rdma_ib_client);
1615 ida_destroy(&nvmet_rdma_queue_ida);
1618 module_init(nvmet_rdma_init);
1619 module_exit(nvmet_rdma_exit);
1621 MODULE_LICENSE("GPL v2");
1622 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */