1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
53 isert_login_post_recv(struct isert_conn *isert_conn);
55 isert_rdma_accept(struct isert_conn *isert_conn);
56 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
58 static void isert_release_work(struct work_struct *work);
59 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
60 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
61 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
62 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
65 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
67 return (conn->pi_support &&
68 cmd->prot_op != TARGET_PROT_NORMAL);
73 isert_qp_event_callback(struct ib_event *e, void *context)
75 struct isert_conn *isert_conn = context;
77 isert_err("%s (%d): conn %p\n",
78 ib_event_msg(e->event), e->event, isert_conn);
81 case IB_EVENT_COMM_EST:
82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
84 case IB_EVENT_QP_LAST_WQE_REACHED:
85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
92 static struct isert_comp *
93 isert_comp_get(struct isert_conn *isert_conn)
95 struct isert_device *device = isert_conn->device;
96 struct isert_comp *comp;
99 mutex_lock(&device_list_mutex);
100 for (i = 0; i < device->comps_used; i++)
101 if (device->comps[i].active_qps <
102 device->comps[min].active_qps)
104 comp = &device->comps[min];
106 mutex_unlock(&device_list_mutex);
108 isert_info("conn %p, using comp %p min_index: %d\n",
109 isert_conn, comp, min);
115 isert_comp_put(struct isert_comp *comp)
117 mutex_lock(&device_list_mutex);
119 mutex_unlock(&device_list_mutex);
122 static struct ib_qp *
123 isert_create_qp(struct isert_conn *isert_conn,
124 struct isert_comp *comp,
125 struct rdma_cm_id *cma_id)
127 struct isert_device *device = isert_conn->device;
128 struct ib_qp_init_attr attr;
131 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
132 attr.event_handler = isert_qp_event_callback;
133 attr.qp_context = isert_conn;
134 attr.send_cq = comp->cq;
135 attr.recv_cq = comp->cq;
136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
140 attr.cap.max_recv_sge = 1;
141 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
142 attr.qp_type = IB_QPT_RC;
143 if (device->pi_capable)
144 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
146 ret = rdma_create_qp(cma_id, device->pd, &attr);
148 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
156 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
158 struct isert_comp *comp;
161 comp = isert_comp_get(isert_conn);
162 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
163 if (IS_ERR(isert_conn->qp)) {
164 ret = PTR_ERR(isert_conn->qp);
170 isert_comp_put(comp);
175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177 struct isert_device *device = isert_conn->device;
178 struct ib_device *ib_dev = device->ib_device;
179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
184 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->rx_descs)
189 rx_desc = isert_conn->rx_descs;
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
197 rx_desc->dma_addr = dma_addr;
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
202 rx_sg->lkey = device->pd->local_dma_lkey;
203 rx_desc->rx_cqe.done = isert_recv_done;
209 rx_desc = isert_conn->rx_descs;
210 for (j = 0; j < i; j++, rx_desc++) {
211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
214 kfree(isert_conn->rx_descs);
215 isert_conn->rx_descs = NULL;
216 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
221 isert_free_rx_descriptors(struct isert_conn *isert_conn)
223 struct ib_device *ib_dev = isert_conn->device->ib_device;
224 struct iser_rx_desc *rx_desc;
227 if (!isert_conn->rx_descs)
230 rx_desc = isert_conn->rx_descs;
231 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
232 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
233 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
236 kfree(isert_conn->rx_descs);
237 isert_conn->rx_descs = NULL;
241 isert_free_comps(struct isert_device *device)
245 for (i = 0; i < device->comps_used; i++) {
246 struct isert_comp *comp = &device->comps[i];
249 ib_free_cq(comp->cq);
251 kfree(device->comps);
255 isert_alloc_comps(struct isert_device *device)
257 int i, max_cqe, ret = 0;
259 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
260 device->ib_device->num_comp_vectors));
262 isert_info("Using %d CQs, %s supports %d vectors support "
264 device->comps_used, device->ib_device->name,
265 device->ib_device->num_comp_vectors,
268 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
273 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
275 for (i = 0; i < device->comps_used; i++) {
276 struct isert_comp *comp = &device->comps[i];
278 comp->device = device;
279 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
281 if (IS_ERR(comp->cq)) {
282 isert_err("Unable to allocate cq\n");
283 ret = PTR_ERR(comp->cq);
291 isert_free_comps(device);
296 isert_create_device_ib_res(struct isert_device *device)
298 struct ib_device *ib_dev = device->ib_device;
301 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
302 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
304 ret = isert_alloc_comps(device);
308 device->pd = ib_alloc_pd(ib_dev, 0);
309 if (IS_ERR(device->pd)) {
310 ret = PTR_ERR(device->pd);
311 isert_err("failed to allocate pd, device %p, ret=%d\n",
316 /* Check signature cap */
317 device->pi_capable = ib_dev->attrs.device_cap_flags &
318 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
323 isert_free_comps(device);
331 isert_free_device_ib_res(struct isert_device *device)
333 isert_info("device %p\n", device);
335 ib_dealloc_pd(device->pd);
336 isert_free_comps(device);
340 isert_device_put(struct isert_device *device)
342 mutex_lock(&device_list_mutex);
344 isert_info("device %p refcount %d\n", device, device->refcount);
345 if (!device->refcount) {
346 isert_free_device_ib_res(device);
347 list_del(&device->dev_node);
350 mutex_unlock(&device_list_mutex);
353 static struct isert_device *
354 isert_device_get(struct rdma_cm_id *cma_id)
356 struct isert_device *device;
359 mutex_lock(&device_list_mutex);
360 list_for_each_entry(device, &device_list, dev_node) {
361 if (device->ib_device->node_guid == cma_id->device->node_guid) {
363 isert_info("Found iser device %p refcount %d\n",
364 device, device->refcount);
365 mutex_unlock(&device_list_mutex);
370 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
372 mutex_unlock(&device_list_mutex);
373 return ERR_PTR(-ENOMEM);
376 INIT_LIST_HEAD(&device->dev_node);
378 device->ib_device = cma_id->device;
379 ret = isert_create_device_ib_res(device);
382 mutex_unlock(&device_list_mutex);
387 list_add_tail(&device->dev_node, &device_list);
388 isert_info("Created a new iser device %p refcount %d\n",
389 device, device->refcount);
390 mutex_unlock(&device_list_mutex);
396 isert_init_conn(struct isert_conn *isert_conn)
398 isert_conn->state = ISER_CONN_INIT;
399 INIT_LIST_HEAD(&isert_conn->node);
400 init_completion(&isert_conn->login_comp);
401 init_completion(&isert_conn->login_req_comp);
402 init_waitqueue_head(&isert_conn->rem_wait);
403 kref_init(&isert_conn->kref);
404 mutex_init(&isert_conn->mutex);
405 INIT_WORK(&isert_conn->release_work, isert_release_work);
409 isert_free_login_buf(struct isert_conn *isert_conn)
411 struct ib_device *ib_dev = isert_conn->device->ib_device;
413 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
414 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
415 kfree(isert_conn->login_rsp_buf);
417 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
418 ISER_RX_PAYLOAD_SIZE,
420 kfree(isert_conn->login_req_buf);
424 isert_alloc_login_buf(struct isert_conn *isert_conn,
425 struct ib_device *ib_dev)
429 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
431 if (!isert_conn->login_req_buf)
434 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
435 isert_conn->login_req_buf,
436 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
437 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
439 isert_err("login_req_dma mapping error: %d\n", ret);
440 isert_conn->login_req_dma = 0;
441 goto out_free_login_req_buf;
444 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
445 if (!isert_conn->login_rsp_buf) {
447 goto out_unmap_login_req_buf;
450 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
451 isert_conn->login_rsp_buf,
452 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
453 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
455 isert_err("login_rsp_dma mapping error: %d\n", ret);
456 isert_conn->login_rsp_dma = 0;
457 goto out_free_login_rsp_buf;
462 out_free_login_rsp_buf:
463 kfree(isert_conn->login_rsp_buf);
464 out_unmap_login_req_buf:
465 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
466 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
467 out_free_login_req_buf:
468 kfree(isert_conn->login_req_buf);
473 isert_set_nego_params(struct isert_conn *isert_conn,
474 struct rdma_conn_param *param)
476 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
478 /* Set max inflight RDMA READ requests */
479 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
480 attr->max_qp_init_rd_atom);
481 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
483 if (param->private_data) {
484 u8 flags = *(u8 *)param->private_data;
487 * use remote invalidation if the both initiator
488 * and the HCA support it
490 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
491 (attr->device_cap_flags &
492 IB_DEVICE_MEM_MGT_EXTENSIONS);
493 if (isert_conn->snd_w_inv)
494 isert_info("Using remote invalidation\n");
499 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
501 struct isert_np *isert_np = cma_id->context;
502 struct iscsi_np *np = isert_np->np;
503 struct isert_conn *isert_conn;
504 struct isert_device *device;
507 spin_lock_bh(&np->np_thread_lock);
509 spin_unlock_bh(&np->np_thread_lock);
510 isert_dbg("iscsi_np is not enabled, reject connect request\n");
511 return rdma_reject(cma_id, NULL, 0);
513 spin_unlock_bh(&np->np_thread_lock);
515 isert_dbg("cma_id: %p, portal: %p\n",
516 cma_id, cma_id->context);
518 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
522 isert_init_conn(isert_conn);
523 isert_conn->cm_id = cma_id;
525 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
529 device = isert_device_get(cma_id);
530 if (IS_ERR(device)) {
531 ret = PTR_ERR(device);
532 goto out_rsp_dma_map;
534 isert_conn->device = device;
536 isert_set_nego_params(isert_conn, &event->param.conn);
538 ret = isert_conn_setup_qp(isert_conn, cma_id);
542 ret = isert_login_post_recv(isert_conn);
546 ret = isert_rdma_accept(isert_conn);
550 mutex_lock(&isert_np->mutex);
551 list_add_tail(&isert_conn->node, &isert_np->accepted);
552 mutex_unlock(&isert_np->mutex);
557 isert_device_put(device);
559 isert_free_login_buf(isert_conn);
562 rdma_reject(cma_id, NULL, 0);
567 isert_connect_release(struct isert_conn *isert_conn)
569 struct isert_device *device = isert_conn->device;
571 isert_dbg("conn %p\n", isert_conn);
575 isert_free_rx_descriptors(isert_conn);
576 if (isert_conn->cm_id &&
577 !isert_conn->dev_removed)
578 rdma_destroy_id(isert_conn->cm_id);
580 if (isert_conn->qp) {
581 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
583 isert_comp_put(comp);
584 ib_destroy_qp(isert_conn->qp);
587 if (isert_conn->login_req_buf)
588 isert_free_login_buf(isert_conn);
590 isert_device_put(device);
592 if (isert_conn->dev_removed)
593 wake_up_interruptible(&isert_conn->rem_wait);
599 isert_connected_handler(struct rdma_cm_id *cma_id)
601 struct isert_conn *isert_conn = cma_id->qp->qp_context;
602 struct isert_np *isert_np = cma_id->context;
604 isert_info("conn %p\n", isert_conn);
606 mutex_lock(&isert_conn->mutex);
607 isert_conn->state = ISER_CONN_UP;
608 kref_get(&isert_conn->kref);
609 mutex_unlock(&isert_conn->mutex);
611 mutex_lock(&isert_np->mutex);
612 list_move_tail(&isert_conn->node, &isert_np->pending);
613 mutex_unlock(&isert_np->mutex);
615 isert_info("np %p: Allow accept_np to continue\n", isert_np);
620 isert_release_kref(struct kref *kref)
622 struct isert_conn *isert_conn = container_of(kref,
623 struct isert_conn, kref);
625 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
628 isert_connect_release(isert_conn);
632 isert_put_conn(struct isert_conn *isert_conn)
634 kref_put(&isert_conn->kref, isert_release_kref);
638 isert_handle_unbound_conn(struct isert_conn *isert_conn)
640 struct isert_np *isert_np = isert_conn->cm_id->context;
642 mutex_lock(&isert_np->mutex);
643 if (!list_empty(&isert_conn->node)) {
645 * This means iscsi doesn't know this connection
646 * so schedule a cleanup ourselves
648 list_del_init(&isert_conn->node);
649 isert_put_conn(isert_conn);
650 queue_work(isert_release_wq, &isert_conn->release_work);
652 mutex_unlock(&isert_np->mutex);
656 * isert_conn_terminate() - Initiate connection termination
657 * @isert_conn: isert connection struct
660 * In case the connection state is BOUND, move state
661 * to TEMINATING and start teardown sequence (rdma_disconnect).
662 * In case the connection state is UP, complete flush as well.
664 * This routine must be called with mutex held. Thus it is
665 * safe to call multiple times.
668 isert_conn_terminate(struct isert_conn *isert_conn)
672 if (isert_conn->state >= ISER_CONN_TERMINATING)
675 isert_info("Terminating conn %p state %d\n",
676 isert_conn, isert_conn->state);
677 isert_conn->state = ISER_CONN_TERMINATING;
678 err = rdma_disconnect(isert_conn->cm_id);
680 isert_warn("Failed rdma_disconnect isert_conn %p\n",
685 isert_np_cma_handler(struct isert_np *isert_np,
686 enum rdma_cm_event_type event)
688 isert_dbg("%s (%d): isert np %p\n",
689 rdma_event_msg(event), event, isert_np);
692 case RDMA_CM_EVENT_DEVICE_REMOVAL:
693 isert_np->cm_id = NULL;
695 case RDMA_CM_EVENT_ADDR_CHANGE:
696 isert_np->cm_id = isert_setup_id(isert_np);
697 if (IS_ERR(isert_np->cm_id)) {
698 isert_err("isert np %p setup id failed: %ld\n",
699 isert_np, PTR_ERR(isert_np->cm_id));
700 isert_np->cm_id = NULL;
704 isert_err("isert np %p Unexpected event %d\n",
712 isert_disconnected_handler(struct rdma_cm_id *cma_id,
713 enum rdma_cm_event_type event)
715 struct isert_conn *isert_conn = cma_id->qp->qp_context;
717 mutex_lock(&isert_conn->mutex);
718 switch (isert_conn->state) {
719 case ISER_CONN_TERMINATING:
722 isert_conn_terminate(isert_conn);
723 ib_drain_qp(isert_conn->qp);
724 isert_handle_unbound_conn(isert_conn);
726 case ISER_CONN_BOUND:
727 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
728 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
731 isert_warn("conn %p terminating in state %d\n",
732 isert_conn, isert_conn->state);
734 mutex_unlock(&isert_conn->mutex);
740 isert_connect_error(struct rdma_cm_id *cma_id)
742 struct isert_conn *isert_conn = cma_id->qp->qp_context;
743 struct isert_np *isert_np = cma_id->context;
745 ib_drain_qp(isert_conn->qp);
747 mutex_lock(&isert_np->mutex);
748 list_del_init(&isert_conn->node);
749 mutex_unlock(&isert_np->mutex);
750 isert_conn->cm_id = NULL;
751 isert_put_conn(isert_conn);
757 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
759 struct isert_np *isert_np = cma_id->context;
760 struct isert_conn *isert_conn;
763 isert_info("%s (%d): status %d id %p np %p\n",
764 rdma_event_msg(event->event), event->event,
765 event->status, cma_id, cma_id->context);
767 if (isert_np->cm_id == cma_id)
768 return isert_np_cma_handler(cma_id->context, event->event);
770 switch (event->event) {
771 case RDMA_CM_EVENT_CONNECT_REQUEST:
772 ret = isert_connect_request(cma_id, event);
774 isert_err("failed handle connect request %d\n", ret);
776 case RDMA_CM_EVENT_ESTABLISHED:
777 isert_connected_handler(cma_id);
779 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
780 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
781 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
782 ret = isert_disconnected_handler(cma_id, event->event);
784 case RDMA_CM_EVENT_DEVICE_REMOVAL:
785 isert_conn = cma_id->qp->qp_context;
786 isert_conn->dev_removed = true;
787 isert_disconnected_handler(cma_id, event->event);
788 wait_event_interruptible(isert_conn->rem_wait,
789 isert_conn->state == ISER_CONN_DOWN);
792 * return non-zero from the callback to destroy
796 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
797 isert_info("Connection rejected: %s\n",
798 rdma_reject_msg(cma_id, event->status));
799 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
800 case RDMA_CM_EVENT_CONNECT_ERROR:
801 ret = isert_connect_error(cma_id);
804 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
812 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
814 struct ib_recv_wr *rx_wr, *rx_wr_failed;
816 struct iser_rx_desc *rx_desc;
818 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
819 rx_desc = &isert_conn->rx_descs[i];
821 rx_wr->wr_cqe = &rx_desc->rx_cqe;
822 rx_wr->sg_list = &rx_desc->rx_sg;
824 rx_wr->next = rx_wr + 1;
825 rx_desc->in_use = false;
828 rx_wr->next = NULL; /* mark end of work requests list */
830 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
833 isert_err("ib_post_recv() failed with ret: %d\n", ret);
839 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
841 struct ib_recv_wr *rx_wr_failed, rx_wr;
844 if (!rx_desc->in_use) {
846 * if the descriptor is not in-use we already reposted it
847 * for recv, so just silently return
852 rx_desc->in_use = false;
853 rx_wr.wr_cqe = &rx_desc->rx_cqe;
854 rx_wr.sg_list = &rx_desc->rx_sg;
858 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
860 isert_err("ib_post_recv() failed with ret: %d\n", ret);
866 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
868 struct ib_device *ib_dev = isert_conn->cm_id->device;
869 struct ib_send_wr send_wr, *send_wr_failed;
872 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
873 ISER_HEADERS_LEN, DMA_TO_DEVICE);
875 tx_desc->tx_cqe.done = isert_login_send_done;
878 send_wr.wr_cqe = &tx_desc->tx_cqe;
879 send_wr.sg_list = tx_desc->tx_sg;
880 send_wr.num_sge = tx_desc->num_sge;
881 send_wr.opcode = IB_WR_SEND;
882 send_wr.send_flags = IB_SEND_SIGNALED;
884 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
886 isert_err("ib_post_send() failed, ret: %d\n", ret);
892 __isert_create_send_desc(struct isert_device *device,
893 struct iser_tx_desc *tx_desc)
896 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
897 tx_desc->iser_header.flags = ISCSI_CTRL;
899 tx_desc->num_sge = 1;
901 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
902 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
903 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
908 isert_create_send_desc(struct isert_conn *isert_conn,
909 struct isert_cmd *isert_cmd,
910 struct iser_tx_desc *tx_desc)
912 struct isert_device *device = isert_conn->device;
913 struct ib_device *ib_dev = device->ib_device;
915 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
916 ISER_HEADERS_LEN, DMA_TO_DEVICE);
918 __isert_create_send_desc(device, tx_desc);
922 isert_init_tx_hdrs(struct isert_conn *isert_conn,
923 struct iser_tx_desc *tx_desc)
925 struct isert_device *device = isert_conn->device;
926 struct ib_device *ib_dev = device->ib_device;
929 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
930 ISER_HEADERS_LEN, DMA_TO_DEVICE);
931 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
932 isert_err("ib_dma_mapping_error() failed\n");
936 tx_desc->dma_addr = dma_addr;
937 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
938 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
939 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
941 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
942 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
943 tx_desc->tx_sg[0].lkey);
949 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
950 struct ib_send_wr *send_wr)
952 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
954 tx_desc->tx_cqe.done = isert_send_done;
955 send_wr->wr_cqe = &tx_desc->tx_cqe;
957 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
958 send_wr->opcode = IB_WR_SEND_WITH_INV;
959 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
961 send_wr->opcode = IB_WR_SEND;
964 send_wr->sg_list = &tx_desc->tx_sg[0];
965 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
966 send_wr->send_flags = IB_SEND_SIGNALED;
970 isert_login_post_recv(struct isert_conn *isert_conn)
972 struct ib_recv_wr rx_wr, *rx_wr_fail;
976 memset(&sge, 0, sizeof(struct ib_sge));
977 sge.addr = isert_conn->login_req_dma;
978 sge.length = ISER_RX_PAYLOAD_SIZE;
979 sge.lkey = isert_conn->device->pd->local_dma_lkey;
981 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
982 sge.addr, sge.length, sge.lkey);
984 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
986 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
987 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
988 rx_wr.sg_list = &sge;
991 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
993 isert_err("ib_post_recv() failed: %d\n", ret);
999 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1002 struct isert_conn *isert_conn = conn->context;
1003 struct isert_device *device = isert_conn->device;
1004 struct ib_device *ib_dev = device->ib_device;
1005 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
1008 __isert_create_send_desc(device, tx_desc);
1010 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1011 sizeof(struct iscsi_hdr));
1013 isert_init_tx_hdrs(isert_conn, tx_desc);
1016 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1018 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1019 length, DMA_TO_DEVICE);
1021 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1023 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1024 length, DMA_TO_DEVICE);
1026 tx_dsg->addr = isert_conn->login_rsp_dma;
1027 tx_dsg->length = length;
1028 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1029 tx_desc->num_sge = 2;
1031 if (!login->login_failed) {
1032 if (login->login_complete) {
1033 ret = isert_alloc_rx_descriptors(isert_conn);
1037 ret = isert_post_recvm(isert_conn,
1038 ISERT_QP_MAX_RECV_DTOS);
1042 /* Now we are in FULL_FEATURE phase */
1043 mutex_lock(&isert_conn->mutex);
1044 isert_conn->state = ISER_CONN_FULL_FEATURE;
1045 mutex_unlock(&isert_conn->mutex);
1049 ret = isert_login_post_recv(isert_conn);
1054 ret = isert_login_post_send(isert_conn, tx_desc);
1062 isert_rx_login_req(struct isert_conn *isert_conn)
1064 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1065 int rx_buflen = isert_conn->login_req_len;
1066 struct iscsi_conn *conn = isert_conn->conn;
1067 struct iscsi_login *login = conn->conn_login;
1070 isert_info("conn %p\n", isert_conn);
1072 WARN_ON_ONCE(!login);
1074 if (login->first_request) {
1075 struct iscsi_login_req *login_req =
1076 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1078 * Setup the initial iscsi_login values from the leading
1079 * login request PDU.
1081 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1082 login->current_stage =
1083 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1085 login->version_min = login_req->min_version;
1086 login->version_max = login_req->max_version;
1087 memcpy(login->isid, login_req->isid, 6);
1088 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1089 login->init_task_tag = login_req->itt;
1090 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1091 login->cid = be16_to_cpu(login_req->cid);
1092 login->tsih = be16_to_cpu(login_req->tsih);
1095 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1097 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1098 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1099 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1100 MAX_KEY_VALUE_PAIRS);
1101 memcpy(login->req_buf, &rx_desc->data[0], size);
1103 if (login->first_request) {
1104 complete(&isert_conn->login_comp);
1107 schedule_delayed_work(&conn->login_work, 0);
1110 static struct iscsi_cmd
1111 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1113 struct isert_conn *isert_conn = conn->context;
1114 struct isert_cmd *isert_cmd;
1115 struct iscsi_cmd *cmd;
1117 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1119 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1122 isert_cmd = iscsit_priv_cmd(cmd);
1123 isert_cmd->conn = isert_conn;
1124 isert_cmd->iscsi_cmd = cmd;
1125 isert_cmd->rx_desc = rx_desc;
1131 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1132 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1133 struct iser_rx_desc *rx_desc, unsigned char *buf)
1135 struct iscsi_conn *conn = isert_conn->conn;
1136 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1137 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1138 bool dump_payload = false;
1139 unsigned int data_len;
1141 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1145 imm_data = cmd->immediate_data;
1146 imm_data_len = cmd->first_burst_len;
1147 unsol_data = cmd->unsolicited_data;
1148 data_len = cmd->se_cmd.data_length;
1150 if (imm_data && imm_data_len == data_len)
1151 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1152 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1155 } else if (rc > 0) {
1156 dump_payload = true;
1163 if (imm_data_len != data_len) {
1164 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1165 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1166 &rx_desc->data[0], imm_data_len);
1167 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1168 sg_nents, imm_data_len);
1170 sg_init_table(&isert_cmd->sg, 1);
1171 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1172 cmd->se_cmd.t_data_nents = 1;
1173 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1174 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1178 cmd->write_data_done += imm_data_len;
1180 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1181 spin_lock_bh(&cmd->istate_lock);
1182 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1183 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1184 spin_unlock_bh(&cmd->istate_lock);
1188 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1190 if (!rc && dump_payload == false && unsol_data)
1191 iscsit_set_unsoliticed_dataout(cmd);
1192 else if (dump_payload && imm_data)
1193 target_put_sess_cmd(&cmd->se_cmd);
1199 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1200 struct iser_rx_desc *rx_desc, unsigned char *buf)
1202 struct scatterlist *sg_start;
1203 struct iscsi_conn *conn = isert_conn->conn;
1204 struct iscsi_cmd *cmd = NULL;
1205 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1206 u32 unsol_data_len = ntoh24(hdr->dlength);
1207 int rc, sg_nents, sg_off, page_off;
1209 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1215 * FIXME: Unexpected unsolicited_data out
1217 if (!cmd->unsolicited_data) {
1218 isert_err("Received unexpected solicited data payload\n");
1223 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1224 "write_data_done: %u, data_length: %u\n",
1225 unsol_data_len, cmd->write_data_done,
1226 cmd->se_cmd.data_length);
1228 sg_off = cmd->write_data_done / PAGE_SIZE;
1229 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1230 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1231 page_off = cmd->write_data_done % PAGE_SIZE;
1233 * FIXME: Non page-aligned unsolicited_data out
1236 isert_err("unexpected non-page aligned data payload\n");
1240 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1241 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1242 sg_nents, &rx_desc->data[0], unsol_data_len);
1244 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1247 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1252 * multiple data-outs on the same command can arrive -
1253 * so post the buffer before hand
1255 rc = isert_post_recv(isert_conn, rx_desc);
1257 isert_err("ib_post_recv failed with %d\n", rc);
1264 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1265 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1268 struct iscsi_conn *conn = isert_conn->conn;
1269 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1272 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1276 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1279 return iscsit_process_nop_out(conn, cmd, hdr);
1283 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1284 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1285 struct iscsi_text *hdr)
1287 struct iscsi_conn *conn = isert_conn->conn;
1288 u32 payload_length = ntoh24(hdr->dlength);
1290 unsigned char *text_in = NULL;
1292 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1296 if (payload_length) {
1297 text_in = kzalloc(payload_length, GFP_KERNEL);
1301 cmd->text_in_ptr = text_in;
1303 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1305 return iscsit_process_text_cmd(conn, cmd, hdr);
1309 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1310 uint32_t read_stag, uint64_t read_va,
1311 uint32_t write_stag, uint64_t write_va)
1313 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1314 struct iscsi_conn *conn = isert_conn->conn;
1315 struct iscsi_cmd *cmd;
1316 struct isert_cmd *isert_cmd;
1318 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1320 if (conn->sess->sess_ops->SessionType &&
1321 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1322 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1323 " ignoring\n", opcode);
1328 case ISCSI_OP_SCSI_CMD:
1329 cmd = isert_allocate_cmd(conn, rx_desc);
1333 isert_cmd = iscsit_priv_cmd(cmd);
1334 isert_cmd->read_stag = read_stag;
1335 isert_cmd->read_va = read_va;
1336 isert_cmd->write_stag = write_stag;
1337 isert_cmd->write_va = write_va;
1338 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1340 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1341 rx_desc, (unsigned char *)hdr);
1343 case ISCSI_OP_NOOP_OUT:
1344 cmd = isert_allocate_cmd(conn, rx_desc);
1348 isert_cmd = iscsit_priv_cmd(cmd);
1349 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1350 rx_desc, (unsigned char *)hdr);
1352 case ISCSI_OP_SCSI_DATA_OUT:
1353 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1354 (unsigned char *)hdr);
1356 case ISCSI_OP_SCSI_TMFUNC:
1357 cmd = isert_allocate_cmd(conn, rx_desc);
1361 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1362 (unsigned char *)hdr);
1364 case ISCSI_OP_LOGOUT:
1365 cmd = isert_allocate_cmd(conn, rx_desc);
1369 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1372 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1373 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1375 cmd = isert_allocate_cmd(conn, rx_desc);
1380 isert_cmd = iscsit_priv_cmd(cmd);
1381 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1382 rx_desc, (struct iscsi_text *)hdr);
1385 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1394 isert_print_wc(struct ib_wc *wc, const char *type)
1396 if (wc->status != IB_WC_WR_FLUSH_ERR)
1397 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1398 ib_wc_status_msg(wc->status), wc->status,
1401 isert_dbg("%s failure: %s (%d)\n", type,
1402 ib_wc_status_msg(wc->status), wc->status);
1406 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1408 struct isert_conn *isert_conn = wc->qp->qp_context;
1409 struct ib_device *ib_dev = isert_conn->cm_id->device;
1410 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1411 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1412 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1413 uint64_t read_va = 0, write_va = 0;
1414 uint32_t read_stag = 0, write_stag = 0;
1416 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1417 isert_print_wc(wc, "recv");
1418 if (wc->status != IB_WC_WR_FLUSH_ERR)
1419 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1423 rx_desc->in_use = true;
1425 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1426 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1428 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1429 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1430 (int)(wc->byte_len - ISER_HEADERS_LEN));
1432 switch (iser_ctrl->flags & 0xF0) {
1434 if (iser_ctrl->flags & ISER_RSV) {
1435 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1436 read_va = be64_to_cpu(iser_ctrl->read_va);
1437 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1438 read_stag, (unsigned long long)read_va);
1440 if (iser_ctrl->flags & ISER_WSV) {
1441 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1442 write_va = be64_to_cpu(iser_ctrl->write_va);
1443 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1444 write_stag, (unsigned long long)write_va);
1447 isert_dbg("ISER ISCSI_CTRL PDU\n");
1450 isert_err("iSER Hello message\n");
1453 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1457 isert_rx_opcode(isert_conn, rx_desc,
1458 read_stag, read_va, write_stag, write_va);
1460 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1461 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1465 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1467 struct isert_conn *isert_conn = wc->qp->qp_context;
1468 struct ib_device *ib_dev = isert_conn->device->ib_device;
1470 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1471 isert_print_wc(wc, "login recv");
1475 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1476 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1478 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1480 if (isert_conn->conn) {
1481 struct iscsi_login *login = isert_conn->conn->conn_login;
1483 if (login && !login->first_request)
1484 isert_rx_login_req(isert_conn);
1487 mutex_lock(&isert_conn->mutex);
1488 complete(&isert_conn->login_req_comp);
1489 mutex_unlock(&isert_conn->mutex);
1491 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1492 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1496 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1498 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1499 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1501 if (!cmd->rw.nr_ops)
1504 if (isert_prot_cmd(conn, se_cmd)) {
1505 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1506 conn->cm_id->port_num, se_cmd->t_data_sg,
1507 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1508 se_cmd->t_prot_nents, dir);
1510 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1511 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1518 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1520 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1521 struct isert_conn *isert_conn = isert_cmd->conn;
1522 struct iscsi_conn *conn = isert_conn->conn;
1523 struct iscsi_text_rsp *hdr;
1525 isert_dbg("Cmd %p\n", isert_cmd);
1527 switch (cmd->iscsi_opcode) {
1528 case ISCSI_OP_SCSI_CMD:
1529 spin_lock_bh(&conn->cmd_lock);
1530 if (!list_empty(&cmd->i_conn_node))
1531 list_del_init(&cmd->i_conn_node);
1532 spin_unlock_bh(&conn->cmd_lock);
1534 if (cmd->data_direction == DMA_TO_DEVICE) {
1535 iscsit_stop_dataout_timer(cmd);
1537 * Check for special case during comp_err where
1538 * WRITE_PENDING has been handed off from core,
1539 * but requires an extra target_put_sess_cmd()
1540 * before transport_generic_free_cmd() below.
1543 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1544 struct se_cmd *se_cmd = &cmd->se_cmd;
1546 target_put_sess_cmd(se_cmd);
1550 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1551 transport_generic_free_cmd(&cmd->se_cmd, 0);
1553 case ISCSI_OP_SCSI_TMFUNC:
1554 spin_lock_bh(&conn->cmd_lock);
1555 if (!list_empty(&cmd->i_conn_node))
1556 list_del_init(&cmd->i_conn_node);
1557 spin_unlock_bh(&conn->cmd_lock);
1559 transport_generic_free_cmd(&cmd->se_cmd, 0);
1561 case ISCSI_OP_REJECT:
1562 case ISCSI_OP_NOOP_OUT:
1564 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1565 /* If the continue bit is on, keep the command alive */
1566 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1569 spin_lock_bh(&conn->cmd_lock);
1570 if (!list_empty(&cmd->i_conn_node))
1571 list_del_init(&cmd->i_conn_node);
1572 spin_unlock_bh(&conn->cmd_lock);
1575 * Handle special case for REJECT when iscsi_add_reject*() has
1576 * overwritten the original iscsi_opcode assignment, and the
1577 * associated cmd->se_cmd needs to be released.
1579 if (cmd->se_cmd.se_tfo != NULL) {
1580 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1582 transport_generic_free_cmd(&cmd->se_cmd, 0);
1589 iscsit_release_cmd(cmd);
1595 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1597 if (tx_desc->dma_addr != 0) {
1598 isert_dbg("unmap single for tx_desc->dma_addr\n");
1599 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1600 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1601 tx_desc->dma_addr = 0;
1606 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1607 struct ib_device *ib_dev, bool comp_err)
1609 if (isert_cmd->pdu_buf_dma != 0) {
1610 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1611 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1612 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1613 isert_cmd->pdu_buf_dma = 0;
1616 isert_unmap_tx_desc(tx_desc, ib_dev);
1617 isert_put_cmd(isert_cmd, comp_err);
1621 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1623 struct ib_mr_status mr_status;
1626 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1628 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1629 goto fail_mr_status;
1632 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1634 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1636 switch (mr_status.sig_err.err_type) {
1637 case IB_SIG_BAD_GUARD:
1638 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1640 case IB_SIG_BAD_REFTAG:
1641 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1643 case IB_SIG_BAD_APPTAG:
1644 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1647 sec_offset_err = mr_status.sig_err.sig_err_offset;
1648 do_div(sec_offset_err, block_size);
1649 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1651 isert_err("PI error found type %d at sector 0x%llx "
1652 "expected 0x%x vs actual 0x%x\n",
1653 mr_status.sig_err.err_type,
1654 (unsigned long long)se_cmd->bad_sector,
1655 mr_status.sig_err.expected,
1656 mr_status.sig_err.actual);
1665 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1667 struct isert_conn *isert_conn = wc->qp->qp_context;
1668 struct isert_device *device = isert_conn->device;
1669 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1670 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1671 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1674 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1675 isert_print_wc(wc, "rdma write");
1676 if (wc->status != IB_WC_WR_FLUSH_ERR)
1677 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1678 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1682 isert_dbg("Cmd %p\n", isert_cmd);
1684 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1685 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1689 * transport_generic_request_failure() expects to have
1690 * plus two references to handle queue-full, so re-add
1691 * one here as target-core will have already dropped
1692 * it after the first isert_put_datain() callback.
1694 kref_get(&cmd->cmd_kref);
1695 transport_generic_request_failure(cmd, cmd->pi_err);
1698 * XXX: isert_put_response() failure is not retried.
1700 ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1702 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
1707 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1709 struct isert_conn *isert_conn = wc->qp->qp_context;
1710 struct isert_device *device = isert_conn->device;
1711 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1712 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1713 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1714 struct se_cmd *se_cmd = &cmd->se_cmd;
1717 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1718 isert_print_wc(wc, "rdma read");
1719 if (wc->status != IB_WC_WR_FLUSH_ERR)
1720 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1721 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1725 isert_dbg("Cmd %p\n", isert_cmd);
1727 iscsit_stop_dataout_timer(cmd);
1729 if (isert_prot_cmd(isert_conn, se_cmd))
1730 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1731 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1732 cmd->write_data_done = 0;
1734 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1735 spin_lock_bh(&cmd->istate_lock);
1736 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1737 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1738 spin_unlock_bh(&cmd->istate_lock);
1741 * transport_generic_request_failure() will drop the extra
1742 * se_cmd->cmd_kref reference after T10-PI error, and handle
1743 * any non-zero ->queue_status() callback error retries.
1746 transport_generic_request_failure(se_cmd, se_cmd->pi_err);
1748 target_execute_cmd(se_cmd);
1752 isert_do_control_comp(struct work_struct *work)
1754 struct isert_cmd *isert_cmd = container_of(work,
1755 struct isert_cmd, comp_work);
1756 struct isert_conn *isert_conn = isert_cmd->conn;
1757 struct ib_device *ib_dev = isert_conn->cm_id->device;
1758 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1760 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1762 switch (cmd->i_state) {
1763 case ISTATE_SEND_TASKMGTRSP:
1764 iscsit_tmr_post_handler(cmd, cmd->conn);
1765 case ISTATE_SEND_REJECT: /* FALLTHRU */
1766 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1767 cmd->i_state = ISTATE_SENT_STATUS;
1768 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1771 case ISTATE_SEND_LOGOUTRSP:
1772 iscsit_logout_post_handler(cmd, cmd->conn);
1775 isert_err("Unknown i_state %d\n", cmd->i_state);
1782 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1784 struct isert_conn *isert_conn = wc->qp->qp_context;
1785 struct ib_device *ib_dev = isert_conn->cm_id->device;
1786 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1788 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1789 isert_print_wc(wc, "login send");
1790 if (wc->status != IB_WC_WR_FLUSH_ERR)
1791 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1794 isert_unmap_tx_desc(tx_desc, ib_dev);
1798 isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1800 struct isert_conn *isert_conn = wc->qp->qp_context;
1801 struct ib_device *ib_dev = isert_conn->cm_id->device;
1802 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1803 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1805 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1806 isert_print_wc(wc, "send");
1807 if (wc->status != IB_WC_WR_FLUSH_ERR)
1808 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1809 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1813 isert_dbg("Cmd %p\n", isert_cmd);
1815 switch (isert_cmd->iscsi_cmd->i_state) {
1816 case ISTATE_SEND_TASKMGTRSP:
1817 case ISTATE_SEND_LOGOUTRSP:
1818 case ISTATE_SEND_REJECT:
1819 case ISTATE_SEND_TEXTRSP:
1820 isert_unmap_tx_desc(tx_desc, ib_dev);
1822 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1823 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1826 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
1827 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1833 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1835 struct ib_send_wr *wr_failed;
1838 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1840 isert_err("ib_post_recv failed with %d\n", ret);
1844 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
1847 isert_err("ib_post_send failed with %d\n", ret);
1854 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1856 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1857 struct isert_conn *isert_conn = conn->context;
1858 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1859 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1860 &isert_cmd->tx_desc.iscsi_header;
1862 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1863 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1864 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1866 * Attach SENSE DATA payload to iSCSI Response PDU
1868 if (cmd->se_cmd.sense_buffer &&
1869 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1870 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1871 struct isert_device *device = isert_conn->device;
1872 struct ib_device *ib_dev = device->ib_device;
1873 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1874 u32 padding, pdu_len;
1876 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1878 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1880 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1881 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1882 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1884 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1885 (void *)cmd->sense_buffer, pdu_len,
1887 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
1890 isert_cmd->pdu_buf_len = pdu_len;
1891 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1892 tx_dsg->length = pdu_len;
1893 tx_dsg->lkey = device->pd->local_dma_lkey;
1894 isert_cmd->tx_desc.num_sge = 2;
1897 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1899 isert_dbg("Posting SCSI Response\n");
1901 return isert_post_response(isert_conn, isert_cmd);
1905 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1907 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1908 struct isert_conn *isert_conn = conn->context;
1910 spin_lock_bh(&conn->cmd_lock);
1911 if (!list_empty(&cmd->i_conn_node))
1912 list_del_init(&cmd->i_conn_node);
1913 spin_unlock_bh(&conn->cmd_lock);
1915 if (cmd->data_direction == DMA_TO_DEVICE)
1916 iscsit_stop_dataout_timer(cmd);
1917 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1920 static enum target_prot_op
1921 isert_get_sup_prot_ops(struct iscsi_conn *conn)
1923 struct isert_conn *isert_conn = conn->context;
1924 struct isert_device *device = isert_conn->device;
1926 if (conn->tpg->tpg_attrib.t10_pi) {
1927 if (device->pi_capable) {
1928 isert_info("conn %p PI offload enabled\n", isert_conn);
1929 isert_conn->pi_support = true;
1930 return TARGET_PROT_ALL;
1934 isert_info("conn %p PI offload disabled\n", isert_conn);
1935 isert_conn->pi_support = false;
1937 return TARGET_PROT_NORMAL;
1941 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1942 bool nopout_response)
1944 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1945 struct isert_conn *isert_conn = conn->context;
1946 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1948 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1949 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1950 &isert_cmd->tx_desc.iscsi_header,
1952 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1953 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1955 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1957 return isert_post_response(isert_conn, isert_cmd);
1961 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1963 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1964 struct isert_conn *isert_conn = conn->context;
1965 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1967 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1968 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1969 &isert_cmd->tx_desc.iscsi_header);
1970 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1971 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1973 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1975 return isert_post_response(isert_conn, isert_cmd);
1979 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1981 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1982 struct isert_conn *isert_conn = conn->context;
1983 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1985 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1986 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1987 &isert_cmd->tx_desc.iscsi_header);
1988 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1989 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1991 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1993 return isert_post_response(isert_conn, isert_cmd);
1997 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1999 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2000 struct isert_conn *isert_conn = conn->context;
2001 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2002 struct isert_device *device = isert_conn->device;
2003 struct ib_device *ib_dev = device->ib_device;
2004 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2005 struct iscsi_reject *hdr =
2006 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2008 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2009 iscsit_build_reject(cmd, conn, hdr);
2010 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2012 hton24(hdr->dlength, ISCSI_HDR_LEN);
2013 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2014 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2016 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
2018 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2019 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2020 tx_dsg->length = ISCSI_HDR_LEN;
2021 tx_dsg->lkey = device->pd->local_dma_lkey;
2022 isert_cmd->tx_desc.num_sge = 2;
2024 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2026 isert_dbg("conn %p Posting Reject\n", isert_conn);
2028 return isert_post_response(isert_conn, isert_cmd);
2032 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2034 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2035 struct isert_conn *isert_conn = conn->context;
2036 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2037 struct iscsi_text_rsp *hdr =
2038 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2042 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2043 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2048 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2051 struct isert_device *device = isert_conn->device;
2052 struct ib_device *ib_dev = device->ib_device;
2053 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2054 void *txt_rsp_buf = cmd->buf_ptr;
2056 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2057 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2058 if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
2061 isert_cmd->pdu_buf_len = txt_rsp_len;
2062 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2063 tx_dsg->length = txt_rsp_len;
2064 tx_dsg->lkey = device->pd->local_dma_lkey;
2065 isert_cmd->tx_desc.num_sge = 2;
2067 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2069 isert_dbg("conn %p Text Response\n", isert_conn);
2071 return isert_post_response(isert_conn, isert_cmd);
2075 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2076 struct ib_sig_domain *domain)
2078 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2079 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2080 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2081 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2083 * At the moment we hard code those, but if in the future
2084 * the target core would like to use it, we will take it
2087 domain->sig.dif.apptag_check_mask = 0xffff;
2088 domain->sig.dif.app_escape = true;
2089 domain->sig.dif.ref_escape = true;
2090 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2091 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2092 domain->sig.dif.ref_remap = true;
2096 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2098 memset(sig_attrs, 0, sizeof(*sig_attrs));
2100 switch (se_cmd->prot_op) {
2101 case TARGET_PROT_DIN_INSERT:
2102 case TARGET_PROT_DOUT_STRIP:
2103 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2104 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2106 case TARGET_PROT_DOUT_INSERT:
2107 case TARGET_PROT_DIN_STRIP:
2108 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2109 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2111 case TARGET_PROT_DIN_PASS:
2112 case TARGET_PROT_DOUT_PASS:
2113 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2114 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2117 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2121 sig_attrs->check_mask =
2122 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2123 (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
2124 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2129 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2130 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2132 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2133 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2134 u8 port_num = conn->cm_id->port_num;
2139 if (cmd->ctx_init_done)
2142 if (dir == DMA_FROM_DEVICE) {
2143 addr = cmd->write_va;
2144 rkey = cmd->write_stag;
2145 offset = cmd->iscsi_cmd->write_data_done;
2147 addr = cmd->read_va;
2148 rkey = cmd->read_stag;
2152 if (isert_prot_cmd(conn, se_cmd)) {
2153 struct ib_sig_attrs sig_attrs;
2155 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2159 WARN_ON_ONCE(offset);
2160 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2161 se_cmd->t_data_sg, se_cmd->t_data_nents,
2162 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2163 &sig_attrs, addr, rkey, dir);
2165 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2166 se_cmd->t_data_sg, se_cmd->t_data_nents,
2167 offset, addr, rkey, dir);
2171 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2175 cmd->ctx_init_done = true;
2178 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2180 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2185 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2187 struct se_cmd *se_cmd = &cmd->se_cmd;
2188 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2189 struct isert_conn *isert_conn = conn->context;
2190 struct ib_cqe *cqe = NULL;
2191 struct ib_send_wr *chain_wr = NULL;
2194 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2195 isert_cmd, se_cmd->data_length);
2197 if (isert_prot_cmd(isert_conn, se_cmd)) {
2198 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2199 cqe = &isert_cmd->tx_desc.tx_cqe;
2202 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2204 isert_create_send_desc(isert_conn, isert_cmd,
2205 &isert_cmd->tx_desc);
2206 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2207 &isert_cmd->tx_desc.iscsi_header);
2208 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2209 isert_init_send_wr(isert_conn, isert_cmd,
2210 &isert_cmd->tx_desc.send_wr);
2212 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2214 isert_err("ib_post_recv failed with %d\n", rc);
2218 chain_wr = &isert_cmd->tx_desc.send_wr;
2221 rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2222 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2228 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2230 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2233 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2234 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2236 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2237 ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2238 &isert_cmd->tx_desc.tx_cqe, NULL);
2240 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2246 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2248 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2253 spin_lock_bh(&conn->cmd_lock);
2254 list_del_init(&cmd->i_conn_node);
2255 spin_unlock_bh(&conn->cmd_lock);
2256 isert_put_cmd(isert_cmd, true);
2258 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2259 ret = isert_put_nopin(cmd, conn, false);
2262 isert_err("Unknown immediate state: 0x%02x\n", state);
2271 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2273 struct isert_conn *isert_conn = conn->context;
2277 case ISTATE_SEND_LOGOUTRSP:
2278 ret = isert_put_logout_rsp(cmd, conn);
2280 isert_conn->logout_posted = true;
2282 case ISTATE_SEND_NOPIN:
2283 ret = isert_put_nopin(cmd, conn, true);
2285 case ISTATE_SEND_TASKMGTRSP:
2286 ret = isert_put_tm_rsp(cmd, conn);
2288 case ISTATE_SEND_REJECT:
2289 ret = isert_put_reject(cmd, conn);
2291 case ISTATE_SEND_TEXTRSP:
2292 ret = isert_put_text_rsp(cmd, conn);
2294 case ISTATE_SEND_STATUS:
2296 * Special case for sending non GOOD SCSI status from TX thread
2297 * context during pre se_cmd excecution failure.
2299 ret = isert_put_response(conn, cmd);
2302 isert_err("Unknown response state: 0x%02x\n", state);
2311 isert_setup_id(struct isert_np *isert_np)
2313 struct iscsi_np *np = isert_np->np;
2314 struct rdma_cm_id *id;
2315 struct sockaddr *sa;
2318 sa = (struct sockaddr *)&np->np_sockaddr;
2319 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2321 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2322 RDMA_PS_TCP, IB_QPT_RC);
2324 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2328 isert_dbg("id %p context %p\n", id, id->context);
2330 ret = rdma_bind_addr(id, sa);
2332 isert_err("rdma_bind_addr() failed: %d\n", ret);
2336 ret = rdma_listen(id, 0);
2338 isert_err("rdma_listen() failed: %d\n", ret);
2344 rdma_destroy_id(id);
2346 return ERR_PTR(ret);
2350 isert_setup_np(struct iscsi_np *np,
2351 struct sockaddr_storage *ksockaddr)
2353 struct isert_np *isert_np;
2354 struct rdma_cm_id *isert_lid;
2357 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2361 sema_init(&isert_np->sem, 0);
2362 mutex_init(&isert_np->mutex);
2363 INIT_LIST_HEAD(&isert_np->accepted);
2364 INIT_LIST_HEAD(&isert_np->pending);
2368 * Setup the np->np_sockaddr from the passed sockaddr setup
2369 * in iscsi_target_configfs.c code..
2371 memcpy(&np->np_sockaddr, ksockaddr,
2372 sizeof(struct sockaddr_storage));
2374 isert_lid = isert_setup_id(isert_np);
2375 if (IS_ERR(isert_lid)) {
2376 ret = PTR_ERR(isert_lid);
2380 isert_np->cm_id = isert_lid;
2381 np->np_context = isert_np;
2392 isert_rdma_accept(struct isert_conn *isert_conn)
2394 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2395 struct rdma_conn_param cp;
2397 struct iser_cm_hdr rsp_hdr;
2399 memset(&cp, 0, sizeof(struct rdma_conn_param));
2400 cp.initiator_depth = isert_conn->initiator_depth;
2402 cp.rnr_retry_count = 7;
2404 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2405 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2406 if (!isert_conn->snd_w_inv)
2407 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2408 cp.private_data = (void *)&rsp_hdr;
2409 cp.private_data_len = sizeof(rsp_hdr);
2411 ret = rdma_accept(cm_id, &cp);
2413 isert_err("rdma_accept() failed with: %d\n", ret);
2421 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2423 struct isert_conn *isert_conn = conn->context;
2426 isert_info("before login_req comp conn: %p\n", isert_conn);
2427 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2429 isert_err("isert_conn %p interrupted before got login req\n",
2433 reinit_completion(&isert_conn->login_req_comp);
2436 * For login requests after the first PDU, isert_rx_login_req() will
2437 * kick schedule_delayed_work(&conn->login_work) as the packet is
2438 * received, which turns this callback from iscsi_target_do_login_rx()
2441 if (!login->first_request)
2444 isert_rx_login_req(isert_conn);
2446 isert_info("before login_comp conn: %p\n", conn);
2447 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2451 isert_info("processing login->req: %p\n", login->req);
2457 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2458 struct isert_conn *isert_conn)
2460 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2461 struct rdma_route *cm_route = &cm_id->route;
2463 conn->login_family = np->np_sockaddr.ss_family;
2465 conn->login_sockaddr = cm_route->addr.dst_addr;
2466 conn->local_sockaddr = cm_route->addr.src_addr;
2470 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2472 struct isert_np *isert_np = np->np_context;
2473 struct isert_conn *isert_conn;
2477 ret = down_interruptible(&isert_np->sem);
2481 spin_lock_bh(&np->np_thread_lock);
2482 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2483 spin_unlock_bh(&np->np_thread_lock);
2484 isert_dbg("np_thread_state %d\n",
2485 np->np_thread_state);
2487 * No point in stalling here when np_thread
2488 * is in state RESET/SHUTDOWN/EXIT - bail
2492 spin_unlock_bh(&np->np_thread_lock);
2494 mutex_lock(&isert_np->mutex);
2495 if (list_empty(&isert_np->pending)) {
2496 mutex_unlock(&isert_np->mutex);
2499 isert_conn = list_first_entry(&isert_np->pending,
2500 struct isert_conn, node);
2501 list_del_init(&isert_conn->node);
2502 mutex_unlock(&isert_np->mutex);
2504 conn->context = isert_conn;
2505 isert_conn->conn = conn;
2506 isert_conn->state = ISER_CONN_BOUND;
2508 isert_set_conn_info(np, conn, isert_conn);
2510 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2516 isert_free_np(struct iscsi_np *np)
2518 struct isert_np *isert_np = np->np_context;
2519 struct isert_conn *isert_conn, *n;
2520 LIST_HEAD(drop_conn_list);
2522 if (isert_np->cm_id)
2523 rdma_destroy_id(isert_np->cm_id);
2526 * FIXME: At this point we don't have a good way to insure
2527 * that at this point we don't have hanging connections that
2528 * completed RDMA establishment but didn't start iscsi login
2529 * process. So work-around this by cleaning up what ever piled
2530 * up in accepted and pending lists.
2532 mutex_lock(&isert_np->mutex);
2533 if (!list_empty(&isert_np->pending)) {
2534 isert_info("Still have isert pending connections\n");
2535 list_for_each_entry_safe(isert_conn, n,
2538 isert_info("cleaning isert_conn %p state (%d)\n",
2539 isert_conn, isert_conn->state);
2540 list_move_tail(&isert_conn->node, &drop_conn_list);
2544 if (!list_empty(&isert_np->accepted)) {
2545 isert_info("Still have isert accepted connections\n");
2546 list_for_each_entry_safe(isert_conn, n,
2547 &isert_np->accepted,
2549 isert_info("cleaning isert_conn %p state (%d)\n",
2550 isert_conn, isert_conn->state);
2551 list_move_tail(&isert_conn->node, &drop_conn_list);
2554 mutex_unlock(&isert_np->mutex);
2556 list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
2557 list_del_init(&isert_conn->node);
2558 isert_connect_release(isert_conn);
2561 np->np_context = NULL;
2565 static void isert_release_work(struct work_struct *work)
2567 struct isert_conn *isert_conn = container_of(work,
2571 isert_info("Starting release conn %p\n", isert_conn);
2573 mutex_lock(&isert_conn->mutex);
2574 isert_conn->state = ISER_CONN_DOWN;
2575 mutex_unlock(&isert_conn->mutex);
2577 isert_info("Destroying conn %p\n", isert_conn);
2578 isert_put_conn(isert_conn);
2582 isert_wait4logout(struct isert_conn *isert_conn)
2584 struct iscsi_conn *conn = isert_conn->conn;
2586 isert_info("conn %p\n", isert_conn);
2588 if (isert_conn->logout_posted) {
2589 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2590 wait_for_completion_timeout(&conn->conn_logout_comp,
2591 SECONDS_FOR_LOGOUT_COMP * HZ);
2596 isert_wait4cmds(struct iscsi_conn *conn)
2598 isert_info("iscsi_conn %p\n", conn);
2601 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2602 target_wait_for_sess_cmds(conn->sess->se_sess);
2607 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2608 * unsolicitate dataout
2609 * @conn: iscsi connection
2611 * We might still have commands that are waiting for unsolicited
2612 * dataouts messages. We must put the extra reference on those
2613 * before blocking on the target_wait_for_session_cmds
2616 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2618 struct iscsi_cmd *cmd, *tmp;
2619 static LIST_HEAD(drop_cmd_list);
2621 spin_lock_bh(&conn->cmd_lock);
2622 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2623 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2624 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2625 (cmd->write_data_done < cmd->se_cmd.data_length))
2626 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2628 spin_unlock_bh(&conn->cmd_lock);
2630 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2631 list_del_init(&cmd->i_conn_node);
2632 if (cmd->i_state != ISTATE_REMOVE) {
2633 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2635 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2636 isert_put_cmd(isert_cmd, true);
2641 static void isert_wait_conn(struct iscsi_conn *conn)
2643 struct isert_conn *isert_conn = conn->context;
2645 isert_info("Starting conn %p\n", isert_conn);
2647 mutex_lock(&isert_conn->mutex);
2648 isert_conn_terminate(isert_conn);
2649 mutex_unlock(&isert_conn->mutex);
2651 ib_drain_qp(isert_conn->qp);
2652 isert_put_unsol_pending_cmds(conn);
2653 isert_wait4cmds(conn);
2654 isert_wait4logout(isert_conn);
2656 queue_work(isert_release_wq, &isert_conn->release_work);
2659 static void isert_free_conn(struct iscsi_conn *conn)
2661 struct isert_conn *isert_conn = conn->context;
2663 ib_drain_qp(isert_conn->qp);
2664 isert_put_conn(isert_conn);
2667 static void isert_get_rx_pdu(struct iscsi_conn *conn)
2669 struct completion comp;
2671 init_completion(&comp);
2673 wait_for_completion_interruptible(&comp);
2676 static struct iscsit_transport iser_target_transport = {
2678 .transport_type = ISCSI_INFINIBAND,
2679 .rdma_shutdown = true,
2680 .priv_size = sizeof(struct isert_cmd),
2681 .owner = THIS_MODULE,
2682 .iscsit_setup_np = isert_setup_np,
2683 .iscsit_accept_np = isert_accept_np,
2684 .iscsit_free_np = isert_free_np,
2685 .iscsit_wait_conn = isert_wait_conn,
2686 .iscsit_free_conn = isert_free_conn,
2687 .iscsit_get_login_rx = isert_get_login_rx,
2688 .iscsit_put_login_tx = isert_put_login_tx,
2689 .iscsit_immediate_queue = isert_immediate_queue,
2690 .iscsit_response_queue = isert_response_queue,
2691 .iscsit_get_dataout = isert_get_dataout,
2692 .iscsit_queue_data_in = isert_put_datain,
2693 .iscsit_queue_status = isert_put_response,
2694 .iscsit_aborted_task = isert_aborted_task,
2695 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2696 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2699 static int __init isert_init(void)
2703 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2704 WQ_UNBOUND | WQ_HIGHPRI, 0);
2705 if (!isert_comp_wq) {
2706 isert_err("Unable to allocate isert_comp_wq\n");
2710 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2711 WQ_UNBOUND_MAX_ACTIVE);
2712 if (!isert_release_wq) {
2713 isert_err("Unable to allocate isert_release_wq\n");
2715 goto destroy_comp_wq;
2718 iscsit_register_transport(&iser_target_transport);
2719 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2724 destroy_workqueue(isert_comp_wq);
2729 static void __exit isert_exit(void)
2731 flush_scheduled_work();
2732 destroy_workqueue(isert_release_wq);
2733 destroy_workqueue(isert_comp_wq);
2734 iscsit_unregister_transport(&iser_target_transport);
2735 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2738 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2739 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2740 MODULE_LICENSE("GPL");
2742 module_init(isert_init);
2743 module_exit(isert_exit);