1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
53 isert_login_post_recv(struct isert_conn *isert_conn);
55 isert_rdma_accept(struct isert_conn *isert_conn);
56 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
58 static void isert_release_work(struct work_struct *work);
59 static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
60 static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
61 static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
62 static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
65 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
67 return (conn->pi_support &&
68 cmd->prot_op != TARGET_PROT_NORMAL);
73 isert_qp_event_callback(struct ib_event *e, void *context)
75 struct isert_conn *isert_conn = context;
77 isert_err("%s (%d): conn %p\n",
78 ib_event_msg(e->event), e->event, isert_conn);
81 case IB_EVENT_COMM_EST:
82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
84 case IB_EVENT_QP_LAST_WQE_REACHED:
85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
92 static struct isert_comp *
93 isert_comp_get(struct isert_conn *isert_conn)
95 struct isert_device *device = isert_conn->device;
96 struct isert_comp *comp;
99 mutex_lock(&device_list_mutex);
100 for (i = 0; i < device->comps_used; i++)
101 if (device->comps[i].active_qps <
102 device->comps[min].active_qps)
104 comp = &device->comps[min];
106 mutex_unlock(&device_list_mutex);
108 isert_info("conn %p, using comp %p min_index: %d\n",
109 isert_conn, comp, min);
115 isert_comp_put(struct isert_comp *comp)
117 mutex_lock(&device_list_mutex);
119 mutex_unlock(&device_list_mutex);
122 static struct ib_qp *
123 isert_create_qp(struct isert_conn *isert_conn,
124 struct isert_comp *comp,
125 struct rdma_cm_id *cma_id)
127 struct isert_device *device = isert_conn->device;
128 struct ib_qp_init_attr attr;
131 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
132 attr.event_handler = isert_qp_event_callback;
133 attr.qp_context = isert_conn;
134 attr.send_cq = comp->cq;
135 attr.recv_cq = comp->cq;
136 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
137 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
138 attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
139 attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
140 attr.cap.max_recv_sge = 1;
141 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
142 attr.qp_type = IB_QPT_RC;
143 if (device->pi_capable)
144 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
146 ret = rdma_create_qp(cma_id, device->pd, &attr);
148 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
156 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
158 struct isert_comp *comp;
161 comp = isert_comp_get(isert_conn);
162 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
163 if (IS_ERR(isert_conn->qp)) {
164 ret = PTR_ERR(isert_conn->qp);
170 isert_comp_put(comp);
175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177 struct isert_device *device = isert_conn->device;
178 struct ib_device *ib_dev = device->ib_device;
179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
184 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->rx_descs)
189 rx_desc = isert_conn->rx_descs;
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
197 rx_desc->dma_addr = dma_addr;
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
202 rx_sg->lkey = device->pd->local_dma_lkey;
203 rx_desc->rx_cqe.done = isert_recv_done;
209 rx_desc = isert_conn->rx_descs;
210 for (j = 0; j < i; j++, rx_desc++) {
211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
214 kfree(isert_conn->rx_descs);
215 isert_conn->rx_descs = NULL;
217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
223 isert_free_rx_descriptors(struct isert_conn *isert_conn)
225 struct ib_device *ib_dev = isert_conn->device->ib_device;
226 struct iser_rx_desc *rx_desc;
229 if (!isert_conn->rx_descs)
232 rx_desc = isert_conn->rx_descs;
233 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
234 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
238 kfree(isert_conn->rx_descs);
239 isert_conn->rx_descs = NULL;
243 isert_free_comps(struct isert_device *device)
247 for (i = 0; i < device->comps_used; i++) {
248 struct isert_comp *comp = &device->comps[i];
251 ib_free_cq(comp->cq);
253 kfree(device->comps);
257 isert_alloc_comps(struct isert_device *device)
259 int i, max_cqe, ret = 0;
261 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
262 device->ib_device->num_comp_vectors));
264 isert_info("Using %d CQs, %s supports %d vectors support "
266 device->comps_used, device->ib_device->name,
267 device->ib_device->num_comp_vectors,
270 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
272 if (!device->comps) {
273 isert_err("Unable to allocate completion contexts\n");
277 max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
279 for (i = 0; i < device->comps_used; i++) {
280 struct isert_comp *comp = &device->comps[i];
282 comp->device = device;
283 comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
285 if (IS_ERR(comp->cq)) {
286 isert_err("Unable to allocate cq\n");
287 ret = PTR_ERR(comp->cq);
295 isert_free_comps(device);
300 isert_create_device_ib_res(struct isert_device *device)
302 struct ib_device *ib_dev = device->ib_device;
305 isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
306 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
308 ret = isert_alloc_comps(device);
312 device->pd = ib_alloc_pd(ib_dev, 0);
313 if (IS_ERR(device->pd)) {
314 ret = PTR_ERR(device->pd);
315 isert_err("failed to allocate pd, device %p, ret=%d\n",
320 /* Check signature cap */
321 device->pi_capable = ib_dev->attrs.device_cap_flags &
322 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
327 isert_free_comps(device);
335 isert_free_device_ib_res(struct isert_device *device)
337 isert_info("device %p\n", device);
339 ib_dealloc_pd(device->pd);
340 isert_free_comps(device);
344 isert_device_put(struct isert_device *device)
346 mutex_lock(&device_list_mutex);
348 isert_info("device %p refcount %d\n", device, device->refcount);
349 if (!device->refcount) {
350 isert_free_device_ib_res(device);
351 list_del(&device->dev_node);
354 mutex_unlock(&device_list_mutex);
357 static struct isert_device *
358 isert_device_get(struct rdma_cm_id *cma_id)
360 struct isert_device *device;
363 mutex_lock(&device_list_mutex);
364 list_for_each_entry(device, &device_list, dev_node) {
365 if (device->ib_device->node_guid == cma_id->device->node_guid) {
367 isert_info("Found iser device %p refcount %d\n",
368 device, device->refcount);
369 mutex_unlock(&device_list_mutex);
374 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
376 mutex_unlock(&device_list_mutex);
377 return ERR_PTR(-ENOMEM);
380 INIT_LIST_HEAD(&device->dev_node);
382 device->ib_device = cma_id->device;
383 ret = isert_create_device_ib_res(device);
386 mutex_unlock(&device_list_mutex);
391 list_add_tail(&device->dev_node, &device_list);
392 isert_info("Created a new iser device %p refcount %d\n",
393 device, device->refcount);
394 mutex_unlock(&device_list_mutex);
400 isert_init_conn(struct isert_conn *isert_conn)
402 isert_conn->state = ISER_CONN_INIT;
403 INIT_LIST_HEAD(&isert_conn->node);
404 init_completion(&isert_conn->login_comp);
405 init_completion(&isert_conn->login_req_comp);
406 init_waitqueue_head(&isert_conn->rem_wait);
407 kref_init(&isert_conn->kref);
408 mutex_init(&isert_conn->mutex);
409 INIT_WORK(&isert_conn->release_work, isert_release_work);
413 isert_free_login_buf(struct isert_conn *isert_conn)
415 struct ib_device *ib_dev = isert_conn->device->ib_device;
417 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
418 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
419 kfree(isert_conn->login_rsp_buf);
421 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
422 ISER_RX_PAYLOAD_SIZE,
424 kfree(isert_conn->login_req_buf);
428 isert_alloc_login_buf(struct isert_conn *isert_conn,
429 struct ib_device *ib_dev)
433 isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
435 if (!isert_conn->login_req_buf) {
436 isert_err("Unable to allocate isert_conn->login_buf\n");
440 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
441 isert_conn->login_req_buf,
442 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
443 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
445 isert_err("login_req_dma mapping error: %d\n", ret);
446 isert_conn->login_req_dma = 0;
447 goto out_free_login_req_buf;
450 isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
451 if (!isert_conn->login_rsp_buf) {
453 goto out_unmap_login_req_buf;
456 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
457 isert_conn->login_rsp_buf,
458 ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
459 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
461 isert_err("login_rsp_dma mapping error: %d\n", ret);
462 isert_conn->login_rsp_dma = 0;
463 goto out_free_login_rsp_buf;
468 out_free_login_rsp_buf:
469 kfree(isert_conn->login_rsp_buf);
470 out_unmap_login_req_buf:
471 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
472 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
473 out_free_login_req_buf:
474 kfree(isert_conn->login_req_buf);
479 isert_set_nego_params(struct isert_conn *isert_conn,
480 struct rdma_conn_param *param)
482 struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
484 /* Set max inflight RDMA READ requests */
485 isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
486 attr->max_qp_init_rd_atom);
487 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
489 if (param->private_data) {
490 u8 flags = *(u8 *)param->private_data;
493 * use remote invalidation if the both initiator
494 * and the HCA support it
496 isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
497 (attr->device_cap_flags &
498 IB_DEVICE_MEM_MGT_EXTENSIONS);
499 if (isert_conn->snd_w_inv)
500 isert_info("Using remote invalidation\n");
505 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
507 struct isert_np *isert_np = cma_id->context;
508 struct iscsi_np *np = isert_np->np;
509 struct isert_conn *isert_conn;
510 struct isert_device *device;
513 spin_lock_bh(&np->np_thread_lock);
515 spin_unlock_bh(&np->np_thread_lock);
516 isert_dbg("iscsi_np is not enabled, reject connect request\n");
517 return rdma_reject(cma_id, NULL, 0);
519 spin_unlock_bh(&np->np_thread_lock);
521 isert_dbg("cma_id: %p, portal: %p\n",
522 cma_id, cma_id->context);
524 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
528 isert_init_conn(isert_conn);
529 isert_conn->cm_id = cma_id;
531 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
535 device = isert_device_get(cma_id);
536 if (IS_ERR(device)) {
537 ret = PTR_ERR(device);
538 goto out_rsp_dma_map;
540 isert_conn->device = device;
542 isert_set_nego_params(isert_conn, &event->param.conn);
544 ret = isert_conn_setup_qp(isert_conn, cma_id);
548 ret = isert_login_post_recv(isert_conn);
552 ret = isert_rdma_accept(isert_conn);
556 mutex_lock(&isert_np->mutex);
557 list_add_tail(&isert_conn->node, &isert_np->accepted);
558 mutex_unlock(&isert_np->mutex);
563 isert_device_put(device);
565 isert_free_login_buf(isert_conn);
568 rdma_reject(cma_id, NULL, 0);
573 isert_connect_release(struct isert_conn *isert_conn)
575 struct isert_device *device = isert_conn->device;
577 isert_dbg("conn %p\n", isert_conn);
581 isert_free_rx_descriptors(isert_conn);
582 if (isert_conn->cm_id &&
583 !isert_conn->dev_removed)
584 rdma_destroy_id(isert_conn->cm_id);
586 if (isert_conn->qp) {
587 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
589 isert_comp_put(comp);
590 ib_destroy_qp(isert_conn->qp);
593 if (isert_conn->login_req_buf)
594 isert_free_login_buf(isert_conn);
596 isert_device_put(device);
598 if (isert_conn->dev_removed)
599 wake_up_interruptible(&isert_conn->rem_wait);
605 isert_connected_handler(struct rdma_cm_id *cma_id)
607 struct isert_conn *isert_conn = cma_id->qp->qp_context;
608 struct isert_np *isert_np = cma_id->context;
610 isert_info("conn %p\n", isert_conn);
612 mutex_lock(&isert_conn->mutex);
613 isert_conn->state = ISER_CONN_UP;
614 kref_get(&isert_conn->kref);
615 mutex_unlock(&isert_conn->mutex);
617 mutex_lock(&isert_np->mutex);
618 list_move_tail(&isert_conn->node, &isert_np->pending);
619 mutex_unlock(&isert_np->mutex);
621 isert_info("np %p: Allow accept_np to continue\n", isert_np);
626 isert_release_kref(struct kref *kref)
628 struct isert_conn *isert_conn = container_of(kref,
629 struct isert_conn, kref);
631 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
634 isert_connect_release(isert_conn);
638 isert_put_conn(struct isert_conn *isert_conn)
640 kref_put(&isert_conn->kref, isert_release_kref);
644 isert_handle_unbound_conn(struct isert_conn *isert_conn)
646 struct isert_np *isert_np = isert_conn->cm_id->context;
648 mutex_lock(&isert_np->mutex);
649 if (!list_empty(&isert_conn->node)) {
651 * This means iscsi doesn't know this connection
652 * so schedule a cleanup ourselves
654 list_del_init(&isert_conn->node);
655 isert_put_conn(isert_conn);
656 queue_work(isert_release_wq, &isert_conn->release_work);
658 mutex_unlock(&isert_np->mutex);
662 * isert_conn_terminate() - Initiate connection termination
663 * @isert_conn: isert connection struct
666 * In case the connection state is BOUND, move state
667 * to TEMINATING and start teardown sequence (rdma_disconnect).
668 * In case the connection state is UP, complete flush as well.
670 * This routine must be called with mutex held. Thus it is
671 * safe to call multiple times.
674 isert_conn_terminate(struct isert_conn *isert_conn)
678 if (isert_conn->state >= ISER_CONN_TERMINATING)
681 isert_info("Terminating conn %p state %d\n",
682 isert_conn, isert_conn->state);
683 isert_conn->state = ISER_CONN_TERMINATING;
684 err = rdma_disconnect(isert_conn->cm_id);
686 isert_warn("Failed rdma_disconnect isert_conn %p\n",
691 isert_np_cma_handler(struct isert_np *isert_np,
692 enum rdma_cm_event_type event)
694 isert_dbg("%s (%d): isert np %p\n",
695 rdma_event_msg(event), event, isert_np);
698 case RDMA_CM_EVENT_DEVICE_REMOVAL:
699 isert_np->cm_id = NULL;
701 case RDMA_CM_EVENT_ADDR_CHANGE:
702 isert_np->cm_id = isert_setup_id(isert_np);
703 if (IS_ERR(isert_np->cm_id)) {
704 isert_err("isert np %p setup id failed: %ld\n",
705 isert_np, PTR_ERR(isert_np->cm_id));
706 isert_np->cm_id = NULL;
710 isert_err("isert np %p Unexpected event %d\n",
718 isert_disconnected_handler(struct rdma_cm_id *cma_id,
719 enum rdma_cm_event_type event)
721 struct isert_conn *isert_conn = cma_id->qp->qp_context;
723 mutex_lock(&isert_conn->mutex);
724 switch (isert_conn->state) {
725 case ISER_CONN_TERMINATING:
728 isert_conn_terminate(isert_conn);
729 ib_drain_qp(isert_conn->qp);
730 isert_handle_unbound_conn(isert_conn);
732 case ISER_CONN_BOUND:
733 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
734 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
737 isert_warn("conn %p teminating in state %d\n",
738 isert_conn, isert_conn->state);
740 mutex_unlock(&isert_conn->mutex);
746 isert_connect_error(struct rdma_cm_id *cma_id)
748 struct isert_conn *isert_conn = cma_id->qp->qp_context;
750 ib_drain_qp(isert_conn->qp);
751 list_del_init(&isert_conn->node);
752 isert_conn->cm_id = NULL;
753 isert_put_conn(isert_conn);
759 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
761 struct isert_np *isert_np = cma_id->context;
762 struct isert_conn *isert_conn;
765 isert_info("%s (%d): status %d id %p np %p\n",
766 rdma_event_msg(event->event), event->event,
767 event->status, cma_id, cma_id->context);
769 if (isert_np->cm_id == cma_id)
770 return isert_np_cma_handler(cma_id->context, event->event);
772 switch (event->event) {
773 case RDMA_CM_EVENT_CONNECT_REQUEST:
774 ret = isert_connect_request(cma_id, event);
776 isert_err("failed handle connect request %d\n", ret);
778 case RDMA_CM_EVENT_ESTABLISHED:
779 isert_connected_handler(cma_id);
781 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
782 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
783 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
784 ret = isert_disconnected_handler(cma_id, event->event);
786 case RDMA_CM_EVENT_DEVICE_REMOVAL:
787 isert_conn = cma_id->qp->qp_context;
788 isert_conn->dev_removed = true;
789 isert_disconnected_handler(cma_id, event->event);
790 wait_event_interruptible(isert_conn->rem_wait,
791 isert_conn->state == ISER_CONN_DOWN);
794 * return non-zero from the callback to destroy
798 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
799 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
800 case RDMA_CM_EVENT_CONNECT_ERROR:
801 ret = isert_connect_error(cma_id);
804 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
812 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
814 struct ib_recv_wr *rx_wr, *rx_wr_failed;
816 struct iser_rx_desc *rx_desc;
818 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
819 rx_desc = &isert_conn->rx_descs[i];
821 rx_wr->wr_cqe = &rx_desc->rx_cqe;
822 rx_wr->sg_list = &rx_desc->rx_sg;
824 rx_wr->next = rx_wr + 1;
827 rx_wr->next = NULL; /* mark end of work requests list */
829 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
832 isert_err("ib_post_recv() failed with ret: %d\n", ret);
838 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
840 struct ib_recv_wr *rx_wr_failed, rx_wr;
843 rx_wr.wr_cqe = &rx_desc->rx_cqe;
844 rx_wr.sg_list = &rx_desc->rx_sg;
848 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
850 isert_err("ib_post_recv() failed with ret: %d\n", ret);
856 isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
858 struct ib_device *ib_dev = isert_conn->cm_id->device;
859 struct ib_send_wr send_wr, *send_wr_failed;
862 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
863 ISER_HEADERS_LEN, DMA_TO_DEVICE);
865 tx_desc->tx_cqe.done = isert_login_send_done;
868 send_wr.wr_cqe = &tx_desc->tx_cqe;
869 send_wr.sg_list = tx_desc->tx_sg;
870 send_wr.num_sge = tx_desc->num_sge;
871 send_wr.opcode = IB_WR_SEND;
872 send_wr.send_flags = IB_SEND_SIGNALED;
874 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
876 isert_err("ib_post_send() failed, ret: %d\n", ret);
882 __isert_create_send_desc(struct isert_device *device,
883 struct iser_tx_desc *tx_desc)
886 memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
887 tx_desc->iser_header.flags = ISCSI_CTRL;
889 tx_desc->num_sge = 1;
891 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
892 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
893 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
898 isert_create_send_desc(struct isert_conn *isert_conn,
899 struct isert_cmd *isert_cmd,
900 struct iser_tx_desc *tx_desc)
902 struct isert_device *device = isert_conn->device;
903 struct ib_device *ib_dev = device->ib_device;
905 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
906 ISER_HEADERS_LEN, DMA_TO_DEVICE);
908 __isert_create_send_desc(device, tx_desc);
912 isert_init_tx_hdrs(struct isert_conn *isert_conn,
913 struct iser_tx_desc *tx_desc)
915 struct isert_device *device = isert_conn->device;
916 struct ib_device *ib_dev = device->ib_device;
919 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
920 ISER_HEADERS_LEN, DMA_TO_DEVICE);
921 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
922 isert_err("ib_dma_mapping_error() failed\n");
926 tx_desc->dma_addr = dma_addr;
927 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
928 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
929 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
931 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
932 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
933 tx_desc->tx_sg[0].lkey);
939 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
940 struct ib_send_wr *send_wr)
942 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
944 tx_desc->tx_cqe.done = isert_send_done;
945 send_wr->wr_cqe = &tx_desc->tx_cqe;
947 if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
948 send_wr->opcode = IB_WR_SEND_WITH_INV;
949 send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
951 send_wr->opcode = IB_WR_SEND;
954 send_wr->sg_list = &tx_desc->tx_sg[0];
955 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
956 send_wr->send_flags = IB_SEND_SIGNALED;
960 isert_login_post_recv(struct isert_conn *isert_conn)
962 struct ib_recv_wr rx_wr, *rx_wr_fail;
966 memset(&sge, 0, sizeof(struct ib_sge));
967 sge.addr = isert_conn->login_req_dma;
968 sge.length = ISER_RX_PAYLOAD_SIZE;
969 sge.lkey = isert_conn->device->pd->local_dma_lkey;
971 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
972 sge.addr, sge.length, sge.lkey);
974 isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
976 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
977 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
978 rx_wr.sg_list = &sge;
981 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
983 isert_err("ib_post_recv() failed: %d\n", ret);
989 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
992 struct isert_conn *isert_conn = conn->context;
993 struct isert_device *device = isert_conn->device;
994 struct ib_device *ib_dev = device->ib_device;
995 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
998 __isert_create_send_desc(device, tx_desc);
1000 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1001 sizeof(struct iscsi_hdr));
1003 isert_init_tx_hdrs(isert_conn, tx_desc);
1006 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1008 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1009 length, DMA_TO_DEVICE);
1011 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1013 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1014 length, DMA_TO_DEVICE);
1016 tx_dsg->addr = isert_conn->login_rsp_dma;
1017 tx_dsg->length = length;
1018 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1019 tx_desc->num_sge = 2;
1021 if (!login->login_failed) {
1022 if (login->login_complete) {
1023 ret = isert_alloc_rx_descriptors(isert_conn);
1027 ret = isert_post_recvm(isert_conn,
1028 ISERT_QP_MAX_RECV_DTOS);
1032 /* Now we are in FULL_FEATURE phase */
1033 mutex_lock(&isert_conn->mutex);
1034 isert_conn->state = ISER_CONN_FULL_FEATURE;
1035 mutex_unlock(&isert_conn->mutex);
1039 ret = isert_login_post_recv(isert_conn);
1044 ret = isert_login_post_send(isert_conn, tx_desc);
1052 isert_rx_login_req(struct isert_conn *isert_conn)
1054 struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
1055 int rx_buflen = isert_conn->login_req_len;
1056 struct iscsi_conn *conn = isert_conn->conn;
1057 struct iscsi_login *login = conn->conn_login;
1060 isert_info("conn %p\n", isert_conn);
1062 WARN_ON_ONCE(!login);
1064 if (login->first_request) {
1065 struct iscsi_login_req *login_req =
1066 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1068 * Setup the initial iscsi_login values from the leading
1069 * login request PDU.
1071 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1072 login->current_stage =
1073 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1075 login->version_min = login_req->min_version;
1076 login->version_max = login_req->max_version;
1077 memcpy(login->isid, login_req->isid, 6);
1078 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1079 login->init_task_tag = login_req->itt;
1080 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1081 login->cid = be16_to_cpu(login_req->cid);
1082 login->tsih = be16_to_cpu(login_req->tsih);
1085 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1087 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1088 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1089 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1090 MAX_KEY_VALUE_PAIRS);
1091 memcpy(login->req_buf, &rx_desc->data[0], size);
1093 if (login->first_request) {
1094 complete(&isert_conn->login_comp);
1097 schedule_delayed_work(&conn->login_work, 0);
1100 static struct iscsi_cmd
1101 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1103 struct isert_conn *isert_conn = conn->context;
1104 struct isert_cmd *isert_cmd;
1105 struct iscsi_cmd *cmd;
1107 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1109 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1112 isert_cmd = iscsit_priv_cmd(cmd);
1113 isert_cmd->conn = isert_conn;
1114 isert_cmd->iscsi_cmd = cmd;
1115 isert_cmd->rx_desc = rx_desc;
1121 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1122 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1123 struct iser_rx_desc *rx_desc, unsigned char *buf)
1125 struct iscsi_conn *conn = isert_conn->conn;
1126 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1127 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1128 bool dump_payload = false;
1129 unsigned int data_len;
1131 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1135 imm_data = cmd->immediate_data;
1136 imm_data_len = cmd->first_burst_len;
1137 unsol_data = cmd->unsolicited_data;
1138 data_len = cmd->se_cmd.data_length;
1140 if (imm_data && imm_data_len == data_len)
1141 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1142 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1145 } else if (rc > 0) {
1146 dump_payload = true;
1153 if (imm_data_len != data_len) {
1154 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1155 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1156 &rx_desc->data[0], imm_data_len);
1157 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1158 sg_nents, imm_data_len);
1160 sg_init_table(&isert_cmd->sg, 1);
1161 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1162 cmd->se_cmd.t_data_nents = 1;
1163 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1164 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1168 cmd->write_data_done += imm_data_len;
1170 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1171 spin_lock_bh(&cmd->istate_lock);
1172 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1173 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1174 spin_unlock_bh(&cmd->istate_lock);
1178 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1180 if (!rc && dump_payload == false && unsol_data)
1181 iscsit_set_unsoliticed_dataout(cmd);
1182 else if (dump_payload && imm_data)
1183 target_put_sess_cmd(&cmd->se_cmd);
1189 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1190 struct iser_rx_desc *rx_desc, unsigned char *buf)
1192 struct scatterlist *sg_start;
1193 struct iscsi_conn *conn = isert_conn->conn;
1194 struct iscsi_cmd *cmd = NULL;
1195 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1196 u32 unsol_data_len = ntoh24(hdr->dlength);
1197 int rc, sg_nents, sg_off, page_off;
1199 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1205 * FIXME: Unexpected unsolicited_data out
1207 if (!cmd->unsolicited_data) {
1208 isert_err("Received unexpected solicited data payload\n");
1213 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1214 "write_data_done: %u, data_length: %u\n",
1215 unsol_data_len, cmd->write_data_done,
1216 cmd->se_cmd.data_length);
1218 sg_off = cmd->write_data_done / PAGE_SIZE;
1219 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1220 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1221 page_off = cmd->write_data_done % PAGE_SIZE;
1223 * FIXME: Non page-aligned unsolicited_data out
1226 isert_err("unexpected non-page aligned data payload\n");
1230 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1231 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1232 sg_nents, &rx_desc->data[0], unsol_data_len);
1234 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1237 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1242 * multiple data-outs on the same command can arrive -
1243 * so post the buffer before hand
1245 rc = isert_post_recv(isert_conn, rx_desc);
1247 isert_err("ib_post_recv failed with %d\n", rc);
1254 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1255 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1258 struct iscsi_conn *conn = isert_conn->conn;
1259 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1262 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1266 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1269 return iscsit_process_nop_out(conn, cmd, hdr);
1273 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1274 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1275 struct iscsi_text *hdr)
1277 struct iscsi_conn *conn = isert_conn->conn;
1278 u32 payload_length = ntoh24(hdr->dlength);
1280 unsigned char *text_in = NULL;
1282 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1286 if (payload_length) {
1287 text_in = kzalloc(payload_length, GFP_KERNEL);
1289 isert_err("Unable to allocate text_in of payload_length: %u\n",
1294 cmd->text_in_ptr = text_in;
1296 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1298 return iscsit_process_text_cmd(conn, cmd, hdr);
1302 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1303 uint32_t read_stag, uint64_t read_va,
1304 uint32_t write_stag, uint64_t write_va)
1306 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1307 struct iscsi_conn *conn = isert_conn->conn;
1308 struct iscsi_cmd *cmd;
1309 struct isert_cmd *isert_cmd;
1311 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1313 if (conn->sess->sess_ops->SessionType &&
1314 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1315 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1316 " ignoring\n", opcode);
1321 case ISCSI_OP_SCSI_CMD:
1322 cmd = isert_allocate_cmd(conn, rx_desc);
1326 isert_cmd = iscsit_priv_cmd(cmd);
1327 isert_cmd->read_stag = read_stag;
1328 isert_cmd->read_va = read_va;
1329 isert_cmd->write_stag = write_stag;
1330 isert_cmd->write_va = write_va;
1331 isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
1333 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1334 rx_desc, (unsigned char *)hdr);
1336 case ISCSI_OP_NOOP_OUT:
1337 cmd = isert_allocate_cmd(conn, rx_desc);
1341 isert_cmd = iscsit_priv_cmd(cmd);
1342 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1343 rx_desc, (unsigned char *)hdr);
1345 case ISCSI_OP_SCSI_DATA_OUT:
1346 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1347 (unsigned char *)hdr);
1349 case ISCSI_OP_SCSI_TMFUNC:
1350 cmd = isert_allocate_cmd(conn, rx_desc);
1354 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1355 (unsigned char *)hdr);
1357 case ISCSI_OP_LOGOUT:
1358 cmd = isert_allocate_cmd(conn, rx_desc);
1362 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1365 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1366 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1368 cmd = isert_allocate_cmd(conn, rx_desc);
1373 isert_cmd = iscsit_priv_cmd(cmd);
1374 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1375 rx_desc, (struct iscsi_text *)hdr);
1378 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1387 isert_print_wc(struct ib_wc *wc, const char *type)
1389 if (wc->status != IB_WC_WR_FLUSH_ERR)
1390 isert_err("%s failure: %s (%d) vend_err %x\n", type,
1391 ib_wc_status_msg(wc->status), wc->status,
1394 isert_dbg("%s failure: %s (%d)\n", type,
1395 ib_wc_status_msg(wc->status), wc->status);
1399 isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1401 struct isert_conn *isert_conn = wc->qp->qp_context;
1402 struct ib_device *ib_dev = isert_conn->cm_id->device;
1403 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
1404 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1405 struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
1406 uint64_t read_va = 0, write_va = 0;
1407 uint32_t read_stag = 0, write_stag = 0;
1409 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1410 isert_print_wc(wc, "recv");
1411 if (wc->status != IB_WC_WR_FLUSH_ERR)
1412 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1416 ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
1417 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1419 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1420 rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
1421 (int)(wc->byte_len - ISER_HEADERS_LEN));
1423 switch (iser_ctrl->flags & 0xF0) {
1425 if (iser_ctrl->flags & ISER_RSV) {
1426 read_stag = be32_to_cpu(iser_ctrl->read_stag);
1427 read_va = be64_to_cpu(iser_ctrl->read_va);
1428 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1429 read_stag, (unsigned long long)read_va);
1431 if (iser_ctrl->flags & ISER_WSV) {
1432 write_stag = be32_to_cpu(iser_ctrl->write_stag);
1433 write_va = be64_to_cpu(iser_ctrl->write_va);
1434 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1435 write_stag, (unsigned long long)write_va);
1438 isert_dbg("ISER ISCSI_CTRL PDU\n");
1441 isert_err("iSER Hello message\n");
1444 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
1448 isert_rx_opcode(isert_conn, rx_desc,
1449 read_stag, read_va, write_stag, write_va);
1451 ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
1452 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1456 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1458 struct isert_conn *isert_conn = wc->qp->qp_context;
1459 struct ib_device *ib_dev = isert_conn->device->ib_device;
1461 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1462 isert_print_wc(wc, "login recv");
1466 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
1467 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1469 isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
1471 if (isert_conn->conn) {
1472 struct iscsi_login *login = isert_conn->conn->conn_login;
1474 if (login && !login->first_request)
1475 isert_rx_login_req(isert_conn);
1478 mutex_lock(&isert_conn->mutex);
1479 complete(&isert_conn->login_req_comp);
1480 mutex_unlock(&isert_conn->mutex);
1482 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
1483 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
1487 isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
1489 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
1490 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
1492 if (!cmd->rw.nr_ops)
1495 if (isert_prot_cmd(conn, se_cmd)) {
1496 rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
1497 conn->cm_id->port_num, se_cmd->t_data_sg,
1498 se_cmd->t_data_nents, se_cmd->t_prot_sg,
1499 se_cmd->t_prot_nents, dir);
1501 rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
1502 se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
1509 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1511 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1512 struct isert_conn *isert_conn = isert_cmd->conn;
1513 struct iscsi_conn *conn = isert_conn->conn;
1514 struct iscsi_text_rsp *hdr;
1516 isert_dbg("Cmd %p\n", isert_cmd);
1518 switch (cmd->iscsi_opcode) {
1519 case ISCSI_OP_SCSI_CMD:
1520 spin_lock_bh(&conn->cmd_lock);
1521 if (!list_empty(&cmd->i_conn_node))
1522 list_del_init(&cmd->i_conn_node);
1523 spin_unlock_bh(&conn->cmd_lock);
1525 if (cmd->data_direction == DMA_TO_DEVICE) {
1526 iscsit_stop_dataout_timer(cmd);
1528 * Check for special case during comp_err where
1529 * WRITE_PENDING has been handed off from core,
1530 * but requires an extra target_put_sess_cmd()
1531 * before transport_generic_free_cmd() below.
1534 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1535 struct se_cmd *se_cmd = &cmd->se_cmd;
1537 target_put_sess_cmd(se_cmd);
1541 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1542 transport_generic_free_cmd(&cmd->se_cmd, 0);
1544 case ISCSI_OP_SCSI_TMFUNC:
1545 spin_lock_bh(&conn->cmd_lock);
1546 if (!list_empty(&cmd->i_conn_node))
1547 list_del_init(&cmd->i_conn_node);
1548 spin_unlock_bh(&conn->cmd_lock);
1550 transport_generic_free_cmd(&cmd->se_cmd, 0);
1552 case ISCSI_OP_REJECT:
1553 case ISCSI_OP_NOOP_OUT:
1555 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1556 /* If the continue bit is on, keep the command alive */
1557 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1560 spin_lock_bh(&conn->cmd_lock);
1561 if (!list_empty(&cmd->i_conn_node))
1562 list_del_init(&cmd->i_conn_node);
1563 spin_unlock_bh(&conn->cmd_lock);
1566 * Handle special case for REJECT when iscsi_add_reject*() has
1567 * overwritten the original iscsi_opcode assignment, and the
1568 * associated cmd->se_cmd needs to be released.
1570 if (cmd->se_cmd.se_tfo != NULL) {
1571 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1573 transport_generic_free_cmd(&cmd->se_cmd, 0);
1580 iscsit_release_cmd(cmd);
1586 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1588 if (tx_desc->dma_addr != 0) {
1589 isert_dbg("unmap single for tx_desc->dma_addr\n");
1590 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1591 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1592 tx_desc->dma_addr = 0;
1597 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1598 struct ib_device *ib_dev, bool comp_err)
1600 if (isert_cmd->pdu_buf_dma != 0) {
1601 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1602 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1603 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1604 isert_cmd->pdu_buf_dma = 0;
1607 isert_unmap_tx_desc(tx_desc, ib_dev);
1608 isert_put_cmd(isert_cmd, comp_err);
1612 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1614 struct ib_mr_status mr_status;
1617 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1619 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1620 goto fail_mr_status;
1623 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1625 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1627 switch (mr_status.sig_err.err_type) {
1628 case IB_SIG_BAD_GUARD:
1629 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1631 case IB_SIG_BAD_REFTAG:
1632 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1634 case IB_SIG_BAD_APPTAG:
1635 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1638 sec_offset_err = mr_status.sig_err.sig_err_offset;
1639 do_div(sec_offset_err, block_size);
1640 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1642 isert_err("PI error found type %d at sector 0x%llx "
1643 "expected 0x%x vs actual 0x%x\n",
1644 mr_status.sig_err.err_type,
1645 (unsigned long long)se_cmd->bad_sector,
1646 mr_status.sig_err.expected,
1647 mr_status.sig_err.actual);
1656 isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
1658 struct isert_conn *isert_conn = wc->qp->qp_context;
1659 struct isert_device *device = isert_conn->device;
1660 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1661 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1662 struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
1665 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1666 isert_print_wc(wc, "rdma write");
1667 if (wc->status != IB_WC_WR_FLUSH_ERR)
1668 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1669 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1673 isert_dbg("Cmd %p\n", isert_cmd);
1675 ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
1676 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1679 transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
1681 isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
1685 isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1687 struct isert_conn *isert_conn = wc->qp->qp_context;
1688 struct isert_device *device = isert_conn->device;
1689 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
1690 struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
1691 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1692 struct se_cmd *se_cmd = &cmd->se_cmd;
1695 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1696 isert_print_wc(wc, "rdma read");
1697 if (wc->status != IB_WC_WR_FLUSH_ERR)
1698 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1699 isert_completion_put(desc, isert_cmd, device->ib_device, true);
1703 isert_dbg("Cmd %p\n", isert_cmd);
1705 iscsit_stop_dataout_timer(cmd);
1707 if (isert_prot_cmd(isert_conn, se_cmd))
1708 ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
1709 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1710 cmd->write_data_done = 0;
1712 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1713 spin_lock_bh(&cmd->istate_lock);
1714 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1715 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1716 spin_unlock_bh(&cmd->istate_lock);
1719 target_put_sess_cmd(se_cmd);
1720 transport_send_check_condition_and_sense(se_cmd,
1723 target_execute_cmd(se_cmd);
1728 isert_do_control_comp(struct work_struct *work)
1730 struct isert_cmd *isert_cmd = container_of(work,
1731 struct isert_cmd, comp_work);
1732 struct isert_conn *isert_conn = isert_cmd->conn;
1733 struct ib_device *ib_dev = isert_conn->cm_id->device;
1734 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1736 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1738 switch (cmd->i_state) {
1739 case ISTATE_SEND_TASKMGTRSP:
1740 iscsit_tmr_post_handler(cmd, cmd->conn);
1741 case ISTATE_SEND_REJECT: /* FALLTHRU */
1742 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1743 cmd->i_state = ISTATE_SENT_STATUS;
1744 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1747 case ISTATE_SEND_LOGOUTRSP:
1748 iscsit_logout_post_handler(cmd, cmd->conn);
1751 isert_err("Unknown i_state %d\n", cmd->i_state);
1758 isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
1760 struct isert_conn *isert_conn = wc->qp->qp_context;
1761 struct ib_device *ib_dev = isert_conn->cm_id->device;
1762 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1764 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1765 isert_print_wc(wc, "login send");
1766 if (wc->status != IB_WC_WR_FLUSH_ERR)
1767 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1770 isert_unmap_tx_desc(tx_desc, ib_dev);
1774 isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
1776 struct isert_conn *isert_conn = wc->qp->qp_context;
1777 struct ib_device *ib_dev = isert_conn->cm_id->device;
1778 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
1779 struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
1781 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1782 isert_print_wc(wc, "send");
1783 if (wc->status != IB_WC_WR_FLUSH_ERR)
1784 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1785 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1789 isert_dbg("Cmd %p\n", isert_cmd);
1791 switch (isert_cmd->iscsi_cmd->i_state) {
1792 case ISTATE_SEND_TASKMGTRSP:
1793 case ISTATE_SEND_LOGOUTRSP:
1794 case ISTATE_SEND_REJECT:
1795 case ISTATE_SEND_TEXTRSP:
1796 isert_unmap_tx_desc(tx_desc, ib_dev);
1798 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1799 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1802 isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
1803 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1809 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1811 struct ib_send_wr *wr_failed;
1814 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
1816 isert_err("ib_post_recv failed with %d\n", ret);
1820 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
1823 isert_err("ib_post_send failed with %d\n", ret);
1830 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1832 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1833 struct isert_conn *isert_conn = conn->context;
1834 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1835 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1836 &isert_cmd->tx_desc.iscsi_header;
1838 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1839 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1840 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1842 * Attach SENSE DATA payload to iSCSI Response PDU
1844 if (cmd->se_cmd.sense_buffer &&
1845 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1846 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1847 struct isert_device *device = isert_conn->device;
1848 struct ib_device *ib_dev = device->ib_device;
1849 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1850 u32 padding, pdu_len;
1852 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1854 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1856 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1857 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1858 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
1860 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1861 (void *)cmd->sense_buffer, pdu_len,
1864 isert_cmd->pdu_buf_len = pdu_len;
1865 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1866 tx_dsg->length = pdu_len;
1867 tx_dsg->lkey = device->pd->local_dma_lkey;
1868 isert_cmd->tx_desc.num_sge = 2;
1871 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1873 isert_dbg("Posting SCSI Response\n");
1875 return isert_post_response(isert_conn, isert_cmd);
1879 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1881 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1882 struct isert_conn *isert_conn = conn->context;
1884 spin_lock_bh(&conn->cmd_lock);
1885 if (!list_empty(&cmd->i_conn_node))
1886 list_del_init(&cmd->i_conn_node);
1887 spin_unlock_bh(&conn->cmd_lock);
1889 if (cmd->data_direction == DMA_TO_DEVICE)
1890 iscsit_stop_dataout_timer(cmd);
1891 isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
1894 static enum target_prot_op
1895 isert_get_sup_prot_ops(struct iscsi_conn *conn)
1897 struct isert_conn *isert_conn = conn->context;
1898 struct isert_device *device = isert_conn->device;
1900 if (conn->tpg->tpg_attrib.t10_pi) {
1901 if (device->pi_capable) {
1902 isert_info("conn %p PI offload enabled\n", isert_conn);
1903 isert_conn->pi_support = true;
1904 return TARGET_PROT_ALL;
1908 isert_info("conn %p PI offload disabled\n", isert_conn);
1909 isert_conn->pi_support = false;
1911 return TARGET_PROT_NORMAL;
1915 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1916 bool nopout_response)
1918 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1919 struct isert_conn *isert_conn = conn->context;
1920 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1922 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1923 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1924 &isert_cmd->tx_desc.iscsi_header,
1926 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1927 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1929 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
1931 return isert_post_response(isert_conn, isert_cmd);
1935 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1937 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1938 struct isert_conn *isert_conn = conn->context;
1939 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1941 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1942 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1943 &isert_cmd->tx_desc.iscsi_header);
1944 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1945 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1947 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
1949 return isert_post_response(isert_conn, isert_cmd);
1953 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1955 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1956 struct isert_conn *isert_conn = conn->context;
1957 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1959 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1960 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1961 &isert_cmd->tx_desc.iscsi_header);
1962 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1963 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1965 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
1967 return isert_post_response(isert_conn, isert_cmd);
1971 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1973 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1974 struct isert_conn *isert_conn = conn->context;
1975 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1976 struct isert_device *device = isert_conn->device;
1977 struct ib_device *ib_dev = device->ib_device;
1978 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1979 struct iscsi_reject *hdr =
1980 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1982 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1983 iscsit_build_reject(cmd, conn, hdr);
1984 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1986 hton24(hdr->dlength, ISCSI_HDR_LEN);
1987 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
1988 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1990 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
1991 tx_dsg->addr = isert_cmd->pdu_buf_dma;
1992 tx_dsg->length = ISCSI_HDR_LEN;
1993 tx_dsg->lkey = device->pd->local_dma_lkey;
1994 isert_cmd->tx_desc.num_sge = 2;
1996 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
1998 isert_dbg("conn %p Posting Reject\n", isert_conn);
2000 return isert_post_response(isert_conn, isert_cmd);
2004 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2006 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2007 struct isert_conn *isert_conn = conn->context;
2008 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2009 struct iscsi_text_rsp *hdr =
2010 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2014 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2015 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2020 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2023 struct isert_device *device = isert_conn->device;
2024 struct ib_device *ib_dev = device->ib_device;
2025 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2026 void *txt_rsp_buf = cmd->buf_ptr;
2028 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2029 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2031 isert_cmd->pdu_buf_len = txt_rsp_len;
2032 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2033 tx_dsg->length = txt_rsp_len;
2034 tx_dsg->lkey = device->pd->local_dma_lkey;
2035 isert_cmd->tx_desc.num_sge = 2;
2037 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2039 isert_dbg("conn %p Text Response\n", isert_conn);
2041 return isert_post_response(isert_conn, isert_cmd);
2045 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2046 struct ib_sig_domain *domain)
2048 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2049 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2050 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2051 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2053 * At the moment we hard code those, but if in the future
2054 * the target core would like to use it, we will take it
2057 domain->sig.dif.apptag_check_mask = 0xffff;
2058 domain->sig.dif.app_escape = true;
2059 domain->sig.dif.ref_escape = true;
2060 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2061 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2062 domain->sig.dif.ref_remap = true;
2066 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2068 memset(sig_attrs, 0, sizeof(*sig_attrs));
2070 switch (se_cmd->prot_op) {
2071 case TARGET_PROT_DIN_INSERT:
2072 case TARGET_PROT_DOUT_STRIP:
2073 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2074 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2076 case TARGET_PROT_DOUT_INSERT:
2077 case TARGET_PROT_DIN_STRIP:
2078 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2079 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2081 case TARGET_PROT_DIN_PASS:
2082 case TARGET_PROT_DOUT_PASS:
2083 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2084 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2087 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2091 sig_attrs->check_mask =
2092 (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2093 (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
2094 (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2099 isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
2100 struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
2102 struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
2103 enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
2104 u8 port_num = conn->cm_id->port_num;
2109 if (cmd->ctx_init_done)
2112 if (dir == DMA_FROM_DEVICE) {
2113 addr = cmd->write_va;
2114 rkey = cmd->write_stag;
2115 offset = cmd->iscsi_cmd->write_data_done;
2117 addr = cmd->read_va;
2118 rkey = cmd->read_stag;
2122 if (isert_prot_cmd(conn, se_cmd)) {
2123 struct ib_sig_attrs sig_attrs;
2125 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2129 WARN_ON_ONCE(offset);
2130 ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
2131 se_cmd->t_data_sg, se_cmd->t_data_nents,
2132 se_cmd->t_prot_sg, se_cmd->t_prot_nents,
2133 &sig_attrs, addr, rkey, dir);
2135 ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
2136 se_cmd->t_data_sg, se_cmd->t_data_nents,
2137 offset, addr, rkey, dir);
2141 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
2145 cmd->ctx_init_done = true;
2148 ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
2150 isert_err("Cmd: %p failed to post RDMA res\n", cmd);
2155 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2157 struct se_cmd *se_cmd = &cmd->se_cmd;
2158 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2159 struct isert_conn *isert_conn = conn->context;
2160 struct ib_cqe *cqe = NULL;
2161 struct ib_send_wr *chain_wr = NULL;
2164 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2165 isert_cmd, se_cmd->data_length);
2167 if (isert_prot_cmd(isert_conn, se_cmd)) {
2168 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
2169 cqe = &isert_cmd->tx_desc.tx_cqe;
2172 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2174 isert_create_send_desc(isert_conn, isert_cmd,
2175 &isert_cmd->tx_desc);
2176 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2177 &isert_cmd->tx_desc.iscsi_header);
2178 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2179 isert_init_send_wr(isert_conn, isert_cmd,
2180 &isert_cmd->tx_desc.send_wr);
2182 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2184 isert_err("ib_post_recv failed with %d\n", rc);
2188 chain_wr = &isert_cmd->tx_desc.send_wr;
2191 isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
2192 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
2197 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2199 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2201 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2202 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
2204 isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
2205 isert_rdma_rw_ctx_post(isert_cmd, conn->context,
2206 &isert_cmd->tx_desc.tx_cqe, NULL);
2208 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2214 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2216 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2221 spin_lock_bh(&conn->cmd_lock);
2222 list_del_init(&cmd->i_conn_node);
2223 spin_unlock_bh(&conn->cmd_lock);
2224 isert_put_cmd(isert_cmd, true);
2226 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2227 ret = isert_put_nopin(cmd, conn, false);
2230 isert_err("Unknown immediate state: 0x%02x\n", state);
2239 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2241 struct isert_conn *isert_conn = conn->context;
2245 case ISTATE_SEND_LOGOUTRSP:
2246 ret = isert_put_logout_rsp(cmd, conn);
2248 isert_conn->logout_posted = true;
2250 case ISTATE_SEND_NOPIN:
2251 ret = isert_put_nopin(cmd, conn, true);
2253 case ISTATE_SEND_TASKMGTRSP:
2254 ret = isert_put_tm_rsp(cmd, conn);
2256 case ISTATE_SEND_REJECT:
2257 ret = isert_put_reject(cmd, conn);
2259 case ISTATE_SEND_TEXTRSP:
2260 ret = isert_put_text_rsp(cmd, conn);
2262 case ISTATE_SEND_STATUS:
2264 * Special case for sending non GOOD SCSI status from TX thread
2265 * context during pre se_cmd excecution failure.
2267 ret = isert_put_response(conn, cmd);
2270 isert_err("Unknown response state: 0x%02x\n", state);
2279 isert_setup_id(struct isert_np *isert_np)
2281 struct iscsi_np *np = isert_np->np;
2282 struct rdma_cm_id *id;
2283 struct sockaddr *sa;
2286 sa = (struct sockaddr *)&np->np_sockaddr;
2287 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2289 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
2290 RDMA_PS_TCP, IB_QPT_RC);
2292 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2296 isert_dbg("id %p context %p\n", id, id->context);
2298 ret = rdma_bind_addr(id, sa);
2300 isert_err("rdma_bind_addr() failed: %d\n", ret);
2304 ret = rdma_listen(id, 0);
2306 isert_err("rdma_listen() failed: %d\n", ret);
2312 rdma_destroy_id(id);
2314 return ERR_PTR(ret);
2318 isert_setup_np(struct iscsi_np *np,
2319 struct sockaddr_storage *ksockaddr)
2321 struct isert_np *isert_np;
2322 struct rdma_cm_id *isert_lid;
2325 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2327 isert_err("Unable to allocate struct isert_np\n");
2330 sema_init(&isert_np->sem, 0);
2331 mutex_init(&isert_np->mutex);
2332 INIT_LIST_HEAD(&isert_np->accepted);
2333 INIT_LIST_HEAD(&isert_np->pending);
2337 * Setup the np->np_sockaddr from the passed sockaddr setup
2338 * in iscsi_target_configfs.c code..
2340 memcpy(&np->np_sockaddr, ksockaddr,
2341 sizeof(struct sockaddr_storage));
2343 isert_lid = isert_setup_id(isert_np);
2344 if (IS_ERR(isert_lid)) {
2345 ret = PTR_ERR(isert_lid);
2349 isert_np->cm_id = isert_lid;
2350 np->np_context = isert_np;
2361 isert_rdma_accept(struct isert_conn *isert_conn)
2363 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2364 struct rdma_conn_param cp;
2366 struct iser_cm_hdr rsp_hdr;
2368 memset(&cp, 0, sizeof(struct rdma_conn_param));
2369 cp.initiator_depth = isert_conn->initiator_depth;
2371 cp.rnr_retry_count = 7;
2373 memset(&rsp_hdr, 0, sizeof(rsp_hdr));
2374 rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
2375 if (!isert_conn->snd_w_inv)
2376 rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
2377 cp.private_data = (void *)&rsp_hdr;
2378 cp.private_data_len = sizeof(rsp_hdr);
2380 ret = rdma_accept(cm_id, &cp);
2382 isert_err("rdma_accept() failed with: %d\n", ret);
2390 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2392 struct isert_conn *isert_conn = conn->context;
2395 isert_info("before login_req comp conn: %p\n", isert_conn);
2396 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
2398 isert_err("isert_conn %p interrupted before got login req\n",
2402 reinit_completion(&isert_conn->login_req_comp);
2405 * For login requests after the first PDU, isert_rx_login_req() will
2406 * kick schedule_delayed_work(&conn->login_work) as the packet is
2407 * received, which turns this callback from iscsi_target_do_login_rx()
2410 if (!login->first_request)
2413 isert_rx_login_req(isert_conn);
2415 isert_info("before login_comp conn: %p\n", conn);
2416 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
2420 isert_info("processing login->req: %p\n", login->req);
2426 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2427 struct isert_conn *isert_conn)
2429 struct rdma_cm_id *cm_id = isert_conn->cm_id;
2430 struct rdma_route *cm_route = &cm_id->route;
2432 conn->login_family = np->np_sockaddr.ss_family;
2434 conn->login_sockaddr = cm_route->addr.dst_addr;
2435 conn->local_sockaddr = cm_route->addr.src_addr;
2439 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2441 struct isert_np *isert_np = np->np_context;
2442 struct isert_conn *isert_conn;
2446 ret = down_interruptible(&isert_np->sem);
2450 spin_lock_bh(&np->np_thread_lock);
2451 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2452 spin_unlock_bh(&np->np_thread_lock);
2453 isert_dbg("np_thread_state %d\n",
2454 np->np_thread_state);
2456 * No point in stalling here when np_thread
2457 * is in state RESET/SHUTDOWN/EXIT - bail
2461 spin_unlock_bh(&np->np_thread_lock);
2463 mutex_lock(&isert_np->mutex);
2464 if (list_empty(&isert_np->pending)) {
2465 mutex_unlock(&isert_np->mutex);
2468 isert_conn = list_first_entry(&isert_np->pending,
2469 struct isert_conn, node);
2470 list_del_init(&isert_conn->node);
2471 mutex_unlock(&isert_np->mutex);
2473 conn->context = isert_conn;
2474 isert_conn->conn = conn;
2475 isert_conn->state = ISER_CONN_BOUND;
2477 isert_set_conn_info(np, conn, isert_conn);
2479 isert_dbg("Processing isert_conn: %p\n", isert_conn);
2485 isert_free_np(struct iscsi_np *np)
2487 struct isert_np *isert_np = np->np_context;
2488 struct isert_conn *isert_conn, *n;
2490 if (isert_np->cm_id)
2491 rdma_destroy_id(isert_np->cm_id);
2494 * FIXME: At this point we don't have a good way to insure
2495 * that at this point we don't have hanging connections that
2496 * completed RDMA establishment but didn't start iscsi login
2497 * process. So work-around this by cleaning up what ever piled
2498 * up in accepted and pending lists.
2500 mutex_lock(&isert_np->mutex);
2501 if (!list_empty(&isert_np->pending)) {
2502 isert_info("Still have isert pending connections\n");
2503 list_for_each_entry_safe(isert_conn, n,
2506 isert_info("cleaning isert_conn %p state (%d)\n",
2507 isert_conn, isert_conn->state);
2508 isert_connect_release(isert_conn);
2512 if (!list_empty(&isert_np->accepted)) {
2513 isert_info("Still have isert accepted connections\n");
2514 list_for_each_entry_safe(isert_conn, n,
2515 &isert_np->accepted,
2517 isert_info("cleaning isert_conn %p state (%d)\n",
2518 isert_conn, isert_conn->state);
2519 isert_connect_release(isert_conn);
2522 mutex_unlock(&isert_np->mutex);
2524 np->np_context = NULL;
2528 static void isert_release_work(struct work_struct *work)
2530 struct isert_conn *isert_conn = container_of(work,
2534 isert_info("Starting release conn %p\n", isert_conn);
2536 mutex_lock(&isert_conn->mutex);
2537 isert_conn->state = ISER_CONN_DOWN;
2538 mutex_unlock(&isert_conn->mutex);
2540 isert_info("Destroying conn %p\n", isert_conn);
2541 isert_put_conn(isert_conn);
2545 isert_wait4logout(struct isert_conn *isert_conn)
2547 struct iscsi_conn *conn = isert_conn->conn;
2549 isert_info("conn %p\n", isert_conn);
2551 if (isert_conn->logout_posted) {
2552 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
2553 wait_for_completion_timeout(&conn->conn_logout_comp,
2554 SECONDS_FOR_LOGOUT_COMP * HZ);
2559 isert_wait4cmds(struct iscsi_conn *conn)
2561 isert_info("iscsi_conn %p\n", conn);
2564 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2565 target_wait_for_sess_cmds(conn->sess->se_sess);
2570 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2571 * unsolicitate dataout
2572 * @conn: iscsi connection
2574 * We might still have commands that are waiting for unsolicited
2575 * dataouts messages. We must put the extra reference on those
2576 * before blocking on the target_wait_for_session_cmds
2579 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2581 struct iscsi_cmd *cmd, *tmp;
2582 static LIST_HEAD(drop_cmd_list);
2584 spin_lock_bh(&conn->cmd_lock);
2585 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2586 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2587 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2588 (cmd->write_data_done < cmd->se_cmd.data_length))
2589 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2591 spin_unlock_bh(&conn->cmd_lock);
2593 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2594 list_del_init(&cmd->i_conn_node);
2595 if (cmd->i_state != ISTATE_REMOVE) {
2596 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2598 isert_info("conn %p dropping cmd %p\n", conn, cmd);
2599 isert_put_cmd(isert_cmd, true);
2604 static void isert_wait_conn(struct iscsi_conn *conn)
2606 struct isert_conn *isert_conn = conn->context;
2608 isert_info("Starting conn %p\n", isert_conn);
2610 mutex_lock(&isert_conn->mutex);
2611 isert_conn_terminate(isert_conn);
2612 mutex_unlock(&isert_conn->mutex);
2614 ib_drain_qp(isert_conn->qp);
2615 isert_put_unsol_pending_cmds(conn);
2616 isert_wait4cmds(conn);
2617 isert_wait4logout(isert_conn);
2619 queue_work(isert_release_wq, &isert_conn->release_work);
2622 static void isert_free_conn(struct iscsi_conn *conn)
2624 struct isert_conn *isert_conn = conn->context;
2626 ib_drain_qp(isert_conn->qp);
2627 isert_put_conn(isert_conn);
2630 static void isert_get_rx_pdu(struct iscsi_conn *conn)
2632 struct completion comp;
2634 init_completion(&comp);
2636 wait_for_completion_interruptible(&comp);
2639 static struct iscsit_transport iser_target_transport = {
2641 .transport_type = ISCSI_INFINIBAND,
2642 .rdma_shutdown = true,
2643 .priv_size = sizeof(struct isert_cmd),
2644 .owner = THIS_MODULE,
2645 .iscsit_setup_np = isert_setup_np,
2646 .iscsit_accept_np = isert_accept_np,
2647 .iscsit_free_np = isert_free_np,
2648 .iscsit_wait_conn = isert_wait_conn,
2649 .iscsit_free_conn = isert_free_conn,
2650 .iscsit_get_login_rx = isert_get_login_rx,
2651 .iscsit_put_login_tx = isert_put_login_tx,
2652 .iscsit_immediate_queue = isert_immediate_queue,
2653 .iscsit_response_queue = isert_response_queue,
2654 .iscsit_get_dataout = isert_get_dataout,
2655 .iscsit_queue_data_in = isert_put_datain,
2656 .iscsit_queue_status = isert_put_response,
2657 .iscsit_aborted_task = isert_aborted_task,
2658 .iscsit_get_rx_pdu = isert_get_rx_pdu,
2659 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
2662 static int __init isert_init(void)
2666 isert_comp_wq = alloc_workqueue("isert_comp_wq",
2667 WQ_UNBOUND | WQ_HIGHPRI, 0);
2668 if (!isert_comp_wq) {
2669 isert_err("Unable to allocate isert_comp_wq\n");
2674 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
2675 WQ_UNBOUND_MAX_ACTIVE);
2676 if (!isert_release_wq) {
2677 isert_err("Unable to allocate isert_release_wq\n");
2679 goto destroy_comp_wq;
2682 iscsit_register_transport(&iser_target_transport);
2683 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2688 destroy_workqueue(isert_comp_wq);
2693 static void __exit isert_exit(void)
2695 flush_scheduled_work();
2696 destroy_workqueue(isert_release_wq);
2697 destroy_workqueue(isert_comp_wq);
2698 iscsit_unregister_transport(&iser_target_transport);
2699 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2702 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2703 MODULE_VERSION("1.0");
2704 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2705 MODULE_LICENSE("GPL");
2707 module_init(isert_init);
2708 module_exit(isert_exit);