2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/atomic.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_dbg.h>
51 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_transport_srp.h>
57 #define DRV_NAME "ib_srp"
58 #define PFX DRV_NAME ": "
59 #define DRV_VERSION "2.0"
60 #define DRV_RELDATE "July 26, 2015"
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_INFO(release_date, DRV_RELDATE);
67 #if !defined(CONFIG_DYNAMIC_DEBUG)
68 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
69 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
72 static unsigned int srp_sg_tablesize;
73 static unsigned int cmd_sg_entries;
74 static unsigned int indirect_sg_entries;
75 static bool allow_ext_sg;
76 static bool prefer_fr = true;
77 static bool register_always = true;
78 static bool never_register;
79 static int topspin_workarounds = 1;
81 module_param(srp_sg_tablesize, uint, 0444);
82 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
84 module_param(cmd_sg_entries, uint, 0444);
85 MODULE_PARM_DESC(cmd_sg_entries,
86 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
88 module_param(indirect_sg_entries, uint, 0444);
89 MODULE_PARM_DESC(indirect_sg_entries,
90 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
92 module_param(allow_ext_sg, bool, 0444);
93 MODULE_PARM_DESC(allow_ext_sg,
94 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
96 module_param(topspin_workarounds, int, 0444);
97 MODULE_PARM_DESC(topspin_workarounds,
98 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
100 module_param(prefer_fr, bool, 0444);
101 MODULE_PARM_DESC(prefer_fr,
102 "Whether to use fast registration if both FMR and fast registration are supported");
104 module_param(register_always, bool, 0444);
105 MODULE_PARM_DESC(register_always,
106 "Use memory registration even for contiguous memory regions");
108 module_param(never_register, bool, 0444);
109 MODULE_PARM_DESC(never_register, "Never register memory");
111 static const struct kernel_param_ops srp_tmo_ops;
113 static int srp_reconnect_delay = 10;
114 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
116 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
118 static int srp_fast_io_fail_tmo = 15;
119 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
121 MODULE_PARM_DESC(fast_io_fail_tmo,
122 "Number of seconds between the observation of a transport"
123 " layer error and failing all I/O. \"off\" means that this"
124 " functionality is disabled.");
126 static int srp_dev_loss_tmo = 600;
127 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
129 MODULE_PARM_DESC(dev_loss_tmo,
130 "Maximum number of seconds that the SRP transport should"
131 " insulate transport layer errors. After this time has been"
132 " exceeded the SCSI host is removed. Should be"
133 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
134 " if fast_io_fail_tmo has not been set. \"off\" means that"
135 " this functionality is disabled.");
137 static unsigned ch_count;
138 module_param(ch_count, uint, 0444);
139 MODULE_PARM_DESC(ch_count,
140 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
142 static void srp_add_one(struct ib_device *device);
143 static void srp_remove_one(struct ib_device *device, void *client_data);
144 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
145 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
147 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
149 static struct scsi_transport_template *ib_srp_transport_template;
150 static struct workqueue_struct *srp_remove_wq;
152 static struct ib_client srp_client = {
155 .remove = srp_remove_one
158 static struct ib_sa_client srp_sa_client;
160 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
162 int tmo = *(int *)kp->arg;
165 return sprintf(buffer, "%d", tmo);
167 return sprintf(buffer, "off");
170 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
174 res = srp_parse_tmo(&tmo, val);
178 if (kp->arg == &srp_reconnect_delay)
179 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
181 else if (kp->arg == &srp_fast_io_fail_tmo)
182 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
184 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
188 *(int *)kp->arg = tmo;
194 static const struct kernel_param_ops srp_tmo_ops = {
199 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
201 return (struct srp_target_port *) host->hostdata;
204 static const char *srp_target_info(struct Scsi_Host *host)
206 return host_to_target(host)->target_name;
209 static int srp_target_is_topspin(struct srp_target_port *target)
211 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
212 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
214 return topspin_workarounds &&
215 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
216 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
219 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
221 enum dma_data_direction direction)
225 iu = kmalloc(sizeof *iu, gfp_mask);
229 iu->buf = kzalloc(size, gfp_mask);
233 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
235 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
239 iu->direction = direction;
251 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
256 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
262 static void srp_qp_event(struct ib_event *event, void *context)
264 pr_debug("QP event %s (%d)\n",
265 ib_event_msg(event->event), event->event);
268 static int srp_init_qp(struct srp_target_port *target,
271 struct ib_qp_attr *attr;
274 attr = kmalloc(sizeof *attr, GFP_KERNEL);
278 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
279 target->srp_host->port,
280 be16_to_cpu(target->pkey),
285 attr->qp_state = IB_QPS_INIT;
286 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
287 IB_ACCESS_REMOTE_WRITE);
288 attr->port_num = target->srp_host->port;
290 ret = ib_modify_qp(qp, attr,
301 static int srp_new_cm_id(struct srp_rdma_ch *ch)
303 struct srp_target_port *target = ch->target;
304 struct ib_cm_id *new_cm_id;
306 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
308 if (IS_ERR(new_cm_id))
309 return PTR_ERR(new_cm_id);
312 ib_destroy_cm_id(ch->cm_id);
313 ch->cm_id = new_cm_id;
314 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
315 target->srp_host->port))
316 ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
318 ch->path.rec_type = SA_PATH_REC_TYPE_IB;
319 ch->path.sgid = target->sgid;
320 ch->path.dgid = target->orig_dgid;
321 ch->path.pkey = target->pkey;
322 ch->path.service_id = target->service_id;
327 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
329 struct srp_device *dev = target->srp_host->srp_dev;
330 struct ib_fmr_pool_param fmr_param;
332 memset(&fmr_param, 0, sizeof(fmr_param));
333 fmr_param.pool_size = target->mr_pool_size;
334 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
336 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
337 fmr_param.page_shift = ilog2(dev->mr_page_size);
338 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
339 IB_ACCESS_REMOTE_WRITE |
340 IB_ACCESS_REMOTE_READ);
342 return ib_create_fmr_pool(dev->pd, &fmr_param);
346 * srp_destroy_fr_pool() - free the resources owned by a pool
347 * @pool: Fast registration pool to be destroyed.
349 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
352 struct srp_fr_desc *d;
357 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
365 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
366 * @device: IB device to allocate fast registration descriptors for.
367 * @pd: Protection domain associated with the FR descriptors.
368 * @pool_size: Number of descriptors to allocate.
369 * @max_page_list_len: Maximum fast registration work request page list length.
371 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
372 struct ib_pd *pd, int pool_size,
373 int max_page_list_len)
375 struct srp_fr_pool *pool;
376 struct srp_fr_desc *d;
378 int i, ret = -EINVAL;
383 pool = kzalloc(sizeof(struct srp_fr_pool) +
384 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
387 pool->size = pool_size;
388 pool->max_page_list_len = max_page_list_len;
389 spin_lock_init(&pool->lock);
390 INIT_LIST_HEAD(&pool->free_list);
392 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
393 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
398 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
399 dev_name(&device->dev));
403 list_add_tail(&d->entry, &pool->free_list);
410 srp_destroy_fr_pool(pool);
418 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
419 * @pool: Pool to obtain descriptor from.
421 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
423 struct srp_fr_desc *d = NULL;
426 spin_lock_irqsave(&pool->lock, flags);
427 if (!list_empty(&pool->free_list)) {
428 d = list_first_entry(&pool->free_list, typeof(*d), entry);
431 spin_unlock_irqrestore(&pool->lock, flags);
437 * srp_fr_pool_put() - put an FR descriptor back in the free list
438 * @pool: Pool the descriptor was allocated from.
439 * @desc: Pointer to an array of fast registration descriptor pointers.
440 * @n: Number of descriptors to put back.
442 * Note: The caller must already have queued an invalidation request for
443 * desc->mr->rkey before calling this function.
445 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
451 spin_lock_irqsave(&pool->lock, flags);
452 for (i = 0; i < n; i++)
453 list_add(&desc[i]->entry, &pool->free_list);
454 spin_unlock_irqrestore(&pool->lock, flags);
457 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
459 struct srp_device *dev = target->srp_host->srp_dev;
461 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
462 dev->max_pages_per_mr);
466 * srp_destroy_qp() - destroy an RDMA queue pair
467 * @qp: RDMA queue pair.
469 * Drain the qp before destroying it. This avoids that the receive
470 * completion handler can access the queue pair while it is
473 static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
475 spin_lock_irq(&ch->lock);
476 ib_process_cq_direct(ch->send_cq, -1);
477 spin_unlock_irq(&ch->lock);
483 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
485 struct srp_target_port *target = ch->target;
486 struct srp_device *dev = target->srp_host->srp_dev;
487 struct ib_qp_init_attr *init_attr;
488 struct ib_cq *recv_cq, *send_cq;
490 struct ib_fmr_pool *fmr_pool = NULL;
491 struct srp_fr_pool *fr_pool = NULL;
492 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
495 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
499 /* queue_size + 1 for ib_drain_rq() */
500 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
501 ch->comp_vector, IB_POLL_SOFTIRQ);
502 if (IS_ERR(recv_cq)) {
503 ret = PTR_ERR(recv_cq);
507 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
508 ch->comp_vector, IB_POLL_DIRECT);
509 if (IS_ERR(send_cq)) {
510 ret = PTR_ERR(send_cq);
514 init_attr->event_handler = srp_qp_event;
515 init_attr->cap.max_send_wr = m * target->queue_size;
516 init_attr->cap.max_recv_wr = target->queue_size + 1;
517 init_attr->cap.max_recv_sge = 1;
518 init_attr->cap.max_send_sge = 1;
519 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
520 init_attr->qp_type = IB_QPT_RC;
521 init_attr->send_cq = send_cq;
522 init_attr->recv_cq = recv_cq;
524 qp = ib_create_qp(dev->pd, init_attr);
530 ret = srp_init_qp(target, qp);
534 if (dev->use_fast_reg) {
535 fr_pool = srp_alloc_fr_pool(target);
536 if (IS_ERR(fr_pool)) {
537 ret = PTR_ERR(fr_pool);
538 shost_printk(KERN_WARNING, target->scsi_host, PFX
539 "FR pool allocation failed (%d)\n", ret);
542 } else if (dev->use_fmr) {
543 fmr_pool = srp_alloc_fmr_pool(target);
544 if (IS_ERR(fmr_pool)) {
545 ret = PTR_ERR(fmr_pool);
546 shost_printk(KERN_WARNING, target->scsi_host, PFX
547 "FMR pool allocation failed (%d)\n", ret);
553 srp_destroy_qp(ch, ch->qp);
555 ib_free_cq(ch->recv_cq);
557 ib_free_cq(ch->send_cq);
560 ch->recv_cq = recv_cq;
561 ch->send_cq = send_cq;
563 if (dev->use_fast_reg) {
565 srp_destroy_fr_pool(ch->fr_pool);
566 ch->fr_pool = fr_pool;
567 } else if (dev->use_fmr) {
569 ib_destroy_fmr_pool(ch->fmr_pool);
570 ch->fmr_pool = fmr_pool;
591 * Note: this function may be called without srp_alloc_iu_bufs() having been
592 * invoked. Hence the ch->[rt]x_ring checks.
594 static void srp_free_ch_ib(struct srp_target_port *target,
595 struct srp_rdma_ch *ch)
597 struct srp_device *dev = target->srp_host->srp_dev;
604 ib_destroy_cm_id(ch->cm_id);
608 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
612 if (dev->use_fast_reg) {
614 srp_destroy_fr_pool(ch->fr_pool);
615 } else if (dev->use_fmr) {
617 ib_destroy_fmr_pool(ch->fmr_pool);
620 srp_destroy_qp(ch, ch->qp);
621 ib_free_cq(ch->send_cq);
622 ib_free_cq(ch->recv_cq);
625 * Avoid that the SCSI error handler tries to use this channel after
626 * it has been freed. The SCSI error handler can namely continue
627 * trying to perform recovery actions after scsi_remove_host()
633 ch->send_cq = ch->recv_cq = NULL;
636 for (i = 0; i < target->queue_size; ++i)
637 srp_free_iu(target->srp_host, ch->rx_ring[i]);
642 for (i = 0; i < target->queue_size; ++i)
643 srp_free_iu(target->srp_host, ch->tx_ring[i]);
649 static void srp_path_rec_completion(int status,
650 struct sa_path_rec *pathrec,
653 struct srp_rdma_ch *ch = ch_ptr;
654 struct srp_target_port *target = ch->target;
658 shost_printk(KERN_ERR, target->scsi_host,
659 PFX "Got failed path rec status %d\n", status);
665 static int srp_lookup_path(struct srp_rdma_ch *ch)
667 struct srp_target_port *target = ch->target;
670 ch->path.numb_path = 1;
672 init_completion(&ch->done);
675 * Avoid that the SCSI host can be removed by srp_remove_target()
676 * before srp_path_rec_completion() is called.
678 if (!scsi_host_get(target->scsi_host))
681 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
682 target->srp_host->srp_dev->dev,
683 target->srp_host->port,
685 IB_SA_PATH_REC_SERVICE_ID |
686 IB_SA_PATH_REC_DGID |
687 IB_SA_PATH_REC_SGID |
688 IB_SA_PATH_REC_NUMB_PATH |
690 SRP_PATH_REC_TIMEOUT_MS,
692 srp_path_rec_completion,
693 ch, &ch->path_query);
694 ret = ch->path_query_id;
698 ret = wait_for_completion_interruptible(&ch->done);
704 shost_printk(KERN_WARNING, target->scsi_host,
705 PFX "Path record query failed\n");
708 scsi_host_put(target->scsi_host);
714 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
716 struct srp_target_port *target = ch->target;
718 struct ib_cm_req_param param;
719 struct srp_login_req priv;
723 req = kzalloc(sizeof *req, GFP_KERNEL);
727 req->param.primary_path = &ch->path;
728 req->param.alternate_path = NULL;
729 req->param.service_id = target->service_id;
730 req->param.qp_num = ch->qp->qp_num;
731 req->param.qp_type = ch->qp->qp_type;
732 req->param.private_data = &req->priv;
733 req->param.private_data_len = sizeof req->priv;
734 req->param.flow_control = 1;
736 get_random_bytes(&req->param.starting_psn, 4);
737 req->param.starting_psn &= 0xffffff;
740 * Pick some arbitrary defaults here; we could make these
741 * module parameters if anyone cared about setting them.
743 req->param.responder_resources = 4;
744 req->param.remote_cm_response_timeout = 20;
745 req->param.local_cm_response_timeout = 20;
746 req->param.retry_count = target->tl_retry_count;
747 req->param.rnr_retry_count = 7;
748 req->param.max_cm_retries = 15;
750 req->priv.opcode = SRP_LOGIN_REQ;
752 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
753 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
754 SRP_BUF_FORMAT_INDIRECT);
755 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
756 SRP_MULTICHAN_SINGLE);
758 * In the published SRP specification (draft rev. 16a), the
759 * port identifier format is 8 bytes of ID extension followed
760 * by 8 bytes of GUID. Older drafts put the two halves in the
761 * opposite order, so that the GUID comes first.
763 * Targets conforming to these obsolete drafts can be
764 * recognized by the I/O Class they report.
766 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
767 memcpy(req->priv.initiator_port_id,
768 &target->sgid.global.interface_id, 8);
769 memcpy(req->priv.initiator_port_id + 8,
770 &target->initiator_ext, 8);
771 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
772 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
774 memcpy(req->priv.initiator_port_id,
775 &target->initiator_ext, 8);
776 memcpy(req->priv.initiator_port_id + 8,
777 &target->sgid.global.interface_id, 8);
778 memcpy(req->priv.target_port_id, &target->id_ext, 8);
779 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
783 * Topspin/Cisco SRP targets will reject our login unless we
784 * zero out the first 8 bytes of our initiator port ID and set
785 * the second 8 bytes to the local node GUID.
787 if (srp_target_is_topspin(target)) {
788 shost_printk(KERN_DEBUG, target->scsi_host,
789 PFX "Topspin/Cisco initiator port ID workaround "
790 "activated for target GUID %016llx\n",
791 be64_to_cpu(target->ioc_guid));
792 memset(req->priv.initiator_port_id, 0, 8);
793 memcpy(req->priv.initiator_port_id + 8,
794 &target->srp_host->srp_dev->dev->node_guid, 8);
797 status = ib_send_cm_req(ch->cm_id, &req->param);
804 static bool srp_queue_remove_work(struct srp_target_port *target)
806 bool changed = false;
808 spin_lock_irq(&target->lock);
809 if (target->state != SRP_TARGET_REMOVED) {
810 target->state = SRP_TARGET_REMOVED;
813 spin_unlock_irq(&target->lock);
816 queue_work(srp_remove_wq, &target->remove_work);
821 static void srp_disconnect_target(struct srp_target_port *target)
823 struct srp_rdma_ch *ch;
826 /* XXX should send SRP_I_LOGOUT request */
828 for (i = 0; i < target->ch_count; i++) {
830 ch->connected = false;
831 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
832 shost_printk(KERN_DEBUG, target->scsi_host,
833 PFX "Sending CM DREQ failed\n");
838 static void srp_free_req_data(struct srp_target_port *target,
839 struct srp_rdma_ch *ch)
841 struct srp_device *dev = target->srp_host->srp_dev;
842 struct ib_device *ibdev = dev->dev;
843 struct srp_request *req;
849 for (i = 0; i < target->req_ring_size; ++i) {
850 req = &ch->req_ring[i];
851 if (dev->use_fast_reg) {
854 kfree(req->fmr_list);
855 kfree(req->map_page);
857 if (req->indirect_dma_addr) {
858 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
859 target->indirect_size,
862 kfree(req->indirect_desc);
869 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
871 struct srp_target_port *target = ch->target;
872 struct srp_device *srp_dev = target->srp_host->srp_dev;
873 struct ib_device *ibdev = srp_dev->dev;
874 struct srp_request *req;
877 int i, ret = -ENOMEM;
879 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
884 for (i = 0; i < target->req_ring_size; ++i) {
885 req = &ch->req_ring[i];
886 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
890 if (srp_dev->use_fast_reg) {
891 req->fr_list = mr_list;
893 req->fmr_list = mr_list;
894 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
895 sizeof(void *), GFP_KERNEL);
899 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
900 if (!req->indirect_desc)
903 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
904 target->indirect_size,
906 if (ib_dma_mapping_error(ibdev, dma_addr))
909 req->indirect_dma_addr = dma_addr;
918 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
919 * @shost: SCSI host whose attributes to remove from sysfs.
921 * Note: Any attributes defined in the host template and that did not exist
922 * before invocation of this function will be ignored.
924 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
926 struct device_attribute **attr;
928 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
929 device_remove_file(&shost->shost_dev, *attr);
932 static void srp_remove_target(struct srp_target_port *target)
934 struct srp_rdma_ch *ch;
937 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
939 srp_del_scsi_host_attr(target->scsi_host);
940 srp_rport_get(target->rport);
941 srp_remove_host(target->scsi_host);
942 scsi_remove_host(target->scsi_host);
943 srp_stop_rport_timers(target->rport);
944 srp_disconnect_target(target);
945 for (i = 0; i < target->ch_count; i++) {
947 srp_free_ch_ib(target, ch);
949 cancel_work_sync(&target->tl_err_work);
950 srp_rport_put(target->rport);
951 for (i = 0; i < target->ch_count; i++) {
953 srp_free_req_data(target, ch);
958 spin_lock(&target->srp_host->target_lock);
959 list_del(&target->list);
960 spin_unlock(&target->srp_host->target_lock);
962 scsi_host_put(target->scsi_host);
965 static void srp_remove_work(struct work_struct *work)
967 struct srp_target_port *target =
968 container_of(work, struct srp_target_port, remove_work);
970 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
972 srp_remove_target(target);
975 static void srp_rport_delete(struct srp_rport *rport)
977 struct srp_target_port *target = rport->lld_data;
979 srp_queue_remove_work(target);
983 * srp_connected_ch() - number of connected channels
984 * @target: SRP target port.
986 static int srp_connected_ch(struct srp_target_port *target)
990 for (i = 0; i < target->ch_count; i++)
991 c += target->ch[i].connected;
996 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
998 struct srp_target_port *target = ch->target;
1001 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1003 ret = srp_lookup_path(ch);
1008 init_completion(&ch->done);
1009 ret = srp_send_req(ch, multich);
1012 ret = wait_for_completion_interruptible(&ch->done);
1017 * The CM event handling code will set status to
1018 * SRP_PORT_REDIRECT if we get a port redirect REJ
1019 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1020 * redirect REJ back.
1025 ch->connected = true;
1028 case SRP_PORT_REDIRECT:
1029 ret = srp_lookup_path(ch);
1034 case SRP_DLID_REDIRECT:
1037 case SRP_STALE_CONN:
1038 shost_printk(KERN_ERR, target->scsi_host, PFX
1039 "giving up on stale connection\n");
1049 return ret <= 0 ? ret : -ENODEV;
1052 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1054 srp_handle_qp_err(cq, wc, "INV RKEY");
1057 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1060 struct ib_send_wr *bad_wr;
1061 struct ib_send_wr wr = {
1062 .opcode = IB_WR_LOCAL_INV,
1066 .ex.invalidate_rkey = rkey,
1069 wr.wr_cqe = &req->reg_cqe;
1070 req->reg_cqe.done = srp_inv_rkey_err_done;
1071 return ib_post_send(ch->qp, &wr, &bad_wr);
1074 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1075 struct srp_rdma_ch *ch,
1076 struct srp_request *req)
1078 struct srp_target_port *target = ch->target;
1079 struct srp_device *dev = target->srp_host->srp_dev;
1080 struct ib_device *ibdev = dev->dev;
1083 if (!scsi_sglist(scmnd) ||
1084 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1085 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1088 if (dev->use_fast_reg) {
1089 struct srp_fr_desc **pfr;
1091 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1092 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1094 shost_printk(KERN_ERR, target->scsi_host, PFX
1095 "Queueing INV WR for rkey %#x failed (%d)\n",
1096 (*pfr)->mr->rkey, res);
1097 queue_work(system_long_wq,
1098 &target->tl_err_work);
1102 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1104 } else if (dev->use_fmr) {
1105 struct ib_pool_fmr **pfmr;
1107 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1108 ib_fmr_pool_unmap(*pfmr);
1111 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1112 scmnd->sc_data_direction);
1116 * srp_claim_req - Take ownership of the scmnd associated with a request.
1117 * @ch: SRP RDMA channel.
1118 * @req: SRP request.
1119 * @sdev: If not NULL, only take ownership for this SCSI device.
1120 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1121 * ownership of @req->scmnd if it equals @scmnd.
1124 * Either NULL or a pointer to the SCSI command the caller became owner of.
1126 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1127 struct srp_request *req,
1128 struct scsi_device *sdev,
1129 struct scsi_cmnd *scmnd)
1131 unsigned long flags;
1133 spin_lock_irqsave(&ch->lock, flags);
1135 (!sdev || req->scmnd->device == sdev) &&
1136 (!scmnd || req->scmnd == scmnd)) {
1142 spin_unlock_irqrestore(&ch->lock, flags);
1148 * srp_free_req() - Unmap data and adjust ch->req_lim.
1149 * @ch: SRP RDMA channel.
1150 * @req: Request to be freed.
1151 * @scmnd: SCSI command associated with @req.
1152 * @req_lim_delta: Amount to be added to @target->req_lim.
1154 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1155 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1157 unsigned long flags;
1159 srp_unmap_data(scmnd, ch, req);
1161 spin_lock_irqsave(&ch->lock, flags);
1162 ch->req_lim += req_lim_delta;
1163 spin_unlock_irqrestore(&ch->lock, flags);
1166 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1167 struct scsi_device *sdev, int result)
1169 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1172 srp_free_req(ch, req, scmnd, 0);
1173 scmnd->result = result;
1174 scmnd->scsi_done(scmnd);
1178 static void srp_terminate_io(struct srp_rport *rport)
1180 struct srp_target_port *target = rport->lld_data;
1181 struct srp_rdma_ch *ch;
1182 struct Scsi_Host *shost = target->scsi_host;
1183 struct scsi_device *sdev;
1187 * Invoking srp_terminate_io() while srp_queuecommand() is running
1188 * is not safe. Hence the warning statement below.
1190 shost_for_each_device(sdev, shost)
1191 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1193 for (i = 0; i < target->ch_count; i++) {
1194 ch = &target->ch[i];
1196 for (j = 0; j < target->req_ring_size; ++j) {
1197 struct srp_request *req = &ch->req_ring[j];
1199 srp_finish_req(ch, req, NULL,
1200 DID_TRANSPORT_FAILFAST << 16);
1206 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1207 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1208 * srp_reset_device() or srp_reset_host() calls will occur while this function
1209 * is in progress. One way to realize that is not to call this function
1210 * directly but to call srp_reconnect_rport() instead since that last function
1211 * serializes calls of this function via rport->mutex and also blocks
1212 * srp_queuecommand() calls before invoking this function.
1214 static int srp_rport_reconnect(struct srp_rport *rport)
1216 struct srp_target_port *target = rport->lld_data;
1217 struct srp_rdma_ch *ch;
1219 bool multich = false;
1221 srp_disconnect_target(target);
1223 if (target->state == SRP_TARGET_SCANNING)
1227 * Now get a new local CM ID so that we avoid confusing the target in
1228 * case things are really fouled up. Doing so also ensures that all CM
1229 * callbacks will have finished before a new QP is allocated.
1231 for (i = 0; i < target->ch_count; i++) {
1232 ch = &target->ch[i];
1233 ret += srp_new_cm_id(ch);
1235 for (i = 0; i < target->ch_count; i++) {
1236 ch = &target->ch[i];
1237 for (j = 0; j < target->req_ring_size; ++j) {
1238 struct srp_request *req = &ch->req_ring[j];
1240 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
1246 * Whether or not creating a new CM ID succeeded, create a new
1247 * QP. This guarantees that all completion callback function
1248 * invocations have finished before request resetting starts.
1250 ret += srp_create_ch_ib(ch);
1252 INIT_LIST_HEAD(&ch->free_tx);
1253 for (j = 0; j < target->queue_size; ++j)
1254 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1257 target->qp_in_error = false;
1259 for (i = 0; i < target->ch_count; i++) {
1260 ch = &target->ch[i];
1263 ret = srp_connect_ch(ch, multich);
1268 shost_printk(KERN_INFO, target->scsi_host,
1269 PFX "reconnect succeeded\n");
1274 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1275 unsigned int dma_len, u32 rkey)
1277 struct srp_direct_buf *desc = state->desc;
1279 WARN_ON_ONCE(!dma_len);
1281 desc->va = cpu_to_be64(dma_addr);
1282 desc->key = cpu_to_be32(rkey);
1283 desc->len = cpu_to_be32(dma_len);
1285 state->total_len += dma_len;
1290 static int srp_map_finish_fmr(struct srp_map_state *state,
1291 struct srp_rdma_ch *ch)
1293 struct srp_target_port *target = ch->target;
1294 struct srp_device *dev = target->srp_host->srp_dev;
1295 struct ib_pd *pd = target->pd;
1296 struct ib_pool_fmr *fmr;
1299 if (state->fmr.next >= state->fmr.end) {
1300 shost_printk(KERN_ERR, ch->target->scsi_host,
1301 PFX "Out of MRs (mr_per_cmd = %d)\n",
1302 ch->target->mr_per_cmd);
1306 WARN_ON_ONCE(!dev->use_fmr);
1308 if (state->npages == 0)
1311 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1312 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1313 pd->unsafe_global_rkey);
1317 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1318 state->npages, io_addr);
1320 return PTR_ERR(fmr);
1322 *state->fmr.next++ = fmr;
1325 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1326 state->dma_len, fmr->fmr->rkey);
1335 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1337 srp_handle_qp_err(cq, wc, "FAST REG");
1341 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1342 * where to start in the first element. If sg_offset_p != NULL then
1343 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1344 * byte that has not yet been mapped.
1346 static int srp_map_finish_fr(struct srp_map_state *state,
1347 struct srp_request *req,
1348 struct srp_rdma_ch *ch, int sg_nents,
1349 unsigned int *sg_offset_p)
1351 struct srp_target_port *target = ch->target;
1352 struct srp_device *dev = target->srp_host->srp_dev;
1353 struct ib_pd *pd = target->pd;
1354 struct ib_send_wr *bad_wr;
1355 struct ib_reg_wr wr;
1356 struct srp_fr_desc *desc;
1360 if (state->fr.next >= state->fr.end) {
1361 shost_printk(KERN_ERR, ch->target->scsi_host,
1362 PFX "Out of MRs (mr_per_cmd = %d)\n",
1363 ch->target->mr_per_cmd);
1367 WARN_ON_ONCE(!dev->use_fast_reg);
1369 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1370 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1372 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1373 sg_dma_len(state->sg) - sg_offset,
1374 pd->unsafe_global_rkey);
1380 desc = srp_fr_pool_get(ch->fr_pool);
1384 rkey = ib_inc_rkey(desc->mr->rkey);
1385 ib_update_fast_reg_key(desc->mr, rkey);
1387 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1389 if (unlikely(n < 0)) {
1390 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1391 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1392 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1393 sg_offset_p ? *sg_offset_p : -1, n);
1397 WARN_ON_ONCE(desc->mr->length == 0);
1399 req->reg_cqe.done = srp_reg_mr_err_done;
1402 wr.wr.opcode = IB_WR_REG_MR;
1403 wr.wr.wr_cqe = &req->reg_cqe;
1405 wr.wr.send_flags = 0;
1407 wr.key = desc->mr->rkey;
1408 wr.access = (IB_ACCESS_LOCAL_WRITE |
1409 IB_ACCESS_REMOTE_READ |
1410 IB_ACCESS_REMOTE_WRITE);
1412 *state->fr.next++ = desc;
1415 srp_map_desc(state, desc->mr->iova,
1416 desc->mr->length, desc->mr->rkey);
1418 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1419 if (unlikely(err)) {
1420 WARN_ON_ONCE(err == -ENOMEM);
1427 static int srp_map_sg_entry(struct srp_map_state *state,
1428 struct srp_rdma_ch *ch,
1429 struct scatterlist *sg)
1431 struct srp_target_port *target = ch->target;
1432 struct srp_device *dev = target->srp_host->srp_dev;
1433 struct ib_device *ibdev = dev->dev;
1434 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1435 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1436 unsigned int len = 0;
1439 WARN_ON_ONCE(!dma_len);
1442 unsigned offset = dma_addr & ~dev->mr_page_mask;
1444 if (state->npages == dev->max_pages_per_mr ||
1445 (state->npages > 0 && offset != 0)) {
1446 ret = srp_map_finish_fmr(state, ch);
1451 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1454 state->base_dma_addr = dma_addr;
1455 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1456 state->dma_len += len;
1462 * If the end of the MR is not on a page boundary then we need to
1463 * close it out and start a new one -- we can only merge at page
1467 if ((dma_addr & ~dev->mr_page_mask) != 0)
1468 ret = srp_map_finish_fmr(state, ch);
1472 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1473 struct srp_request *req, struct scatterlist *scat,
1476 struct scatterlist *sg;
1479 state->pages = req->map_page;
1480 state->fmr.next = req->fmr_list;
1481 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1483 for_each_sg(scat, sg, count, i) {
1484 ret = srp_map_sg_entry(state, ch, sg);
1489 ret = srp_map_finish_fmr(state, ch);
1496 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1497 struct srp_request *req, struct scatterlist *scat,
1500 unsigned int sg_offset = 0;
1502 state->fr.next = req->fr_list;
1503 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1512 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1513 if (unlikely(n < 0))
1517 for (i = 0; i < n; i++)
1518 state->sg = sg_next(state->sg);
1524 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1525 struct srp_request *req, struct scatterlist *scat,
1528 struct srp_target_port *target = ch->target;
1529 struct srp_device *dev = target->srp_host->srp_dev;
1530 struct scatterlist *sg;
1533 for_each_sg(scat, sg, count, i) {
1534 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1535 ib_sg_dma_len(dev->dev, sg),
1536 target->pd->unsafe_global_rkey);
1543 * Register the indirect data buffer descriptor with the HCA.
1545 * Note: since the indirect data buffer descriptor has been allocated with
1546 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1549 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1550 void **next_mr, void **end_mr, u32 idb_len,
1553 struct srp_target_port *target = ch->target;
1554 struct srp_device *dev = target->srp_host->srp_dev;
1555 struct srp_map_state state;
1556 struct srp_direct_buf idb_desc;
1558 struct scatterlist idb_sg[1];
1561 memset(&state, 0, sizeof(state));
1562 memset(&idb_desc, 0, sizeof(idb_desc));
1563 state.gen.next = next_mr;
1564 state.gen.end = end_mr;
1565 state.desc = &idb_desc;
1566 state.base_dma_addr = req->indirect_dma_addr;
1567 state.dma_len = idb_len;
1569 if (dev->use_fast_reg) {
1571 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1572 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1573 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1574 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1576 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1579 WARN_ON_ONCE(ret < 1);
1580 } else if (dev->use_fmr) {
1581 state.pages = idb_pages;
1582 state.pages[0] = (req->indirect_dma_addr &
1585 ret = srp_map_finish_fmr(&state, ch);
1592 *idb_rkey = idb_desc.key;
1597 static void srp_check_mapping(struct srp_map_state *state,
1598 struct srp_rdma_ch *ch, struct srp_request *req,
1599 struct scatterlist *scat, int count)
1601 struct srp_device *dev = ch->target->srp_host->srp_dev;
1602 struct srp_fr_desc **pfr;
1603 u64 desc_len = 0, mr_len = 0;
1606 for (i = 0; i < state->ndesc; i++)
1607 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1608 if (dev->use_fast_reg)
1609 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1610 mr_len += (*pfr)->mr->length;
1611 else if (dev->use_fmr)
1612 for (i = 0; i < state->nmdesc; i++)
1613 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1614 if (desc_len != scsi_bufflen(req->scmnd) ||
1615 mr_len > scsi_bufflen(req->scmnd))
1616 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1617 scsi_bufflen(req->scmnd), desc_len, mr_len,
1618 state->ndesc, state->nmdesc);
1622 * srp_map_data() - map SCSI data buffer onto an SRP request
1623 * @scmnd: SCSI command to map
1624 * @ch: SRP RDMA channel
1627 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1630 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1631 struct srp_request *req)
1633 struct srp_target_port *target = ch->target;
1634 struct ib_pd *pd = target->pd;
1635 struct scatterlist *scat;
1636 struct srp_cmd *cmd = req->cmd->buf;
1637 int len, nents, count, ret;
1638 struct srp_device *dev;
1639 struct ib_device *ibdev;
1640 struct srp_map_state state;
1641 struct srp_indirect_buf *indirect_hdr;
1642 u32 idb_len, table_len;
1646 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1647 return sizeof (struct srp_cmd);
1649 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1650 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1651 shost_printk(KERN_WARNING, target->scsi_host,
1652 PFX "Unhandled data direction %d\n",
1653 scmnd->sc_data_direction);
1657 nents = scsi_sg_count(scmnd);
1658 scat = scsi_sglist(scmnd);
1660 dev = target->srp_host->srp_dev;
1663 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1664 if (unlikely(count == 0))
1667 fmt = SRP_DATA_DESC_DIRECT;
1668 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1670 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1672 * The midlayer only generated a single gather/scatter
1673 * entry, or DMA mapping coalesced everything to a
1674 * single entry. So a direct descriptor along with
1675 * the DMA MR suffices.
1677 struct srp_direct_buf *buf = (void *) cmd->add_data;
1679 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1680 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
1681 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1688 * We have more than one scatter/gather entry, so build our indirect
1689 * descriptor table, trying to merge as many entries as we can.
1691 indirect_hdr = (void *) cmd->add_data;
1693 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1694 target->indirect_size, DMA_TO_DEVICE);
1696 memset(&state, 0, sizeof(state));
1697 state.desc = req->indirect_desc;
1698 if (dev->use_fast_reg)
1699 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1700 else if (dev->use_fmr)
1701 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1703 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1704 req->nmdesc = state.nmdesc;
1709 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1710 "Memory mapping consistency check");
1711 if (DYNAMIC_DEBUG_BRANCH(ddm))
1712 srp_check_mapping(&state, ch, req, scat, count);
1715 /* We've mapped the request, now pull as much of the indirect
1716 * descriptor table as we can into the command buffer. If this
1717 * target is not using an external indirect table, we are
1718 * guaranteed to fit into the command, as the SCSI layer won't
1719 * give us more S/G entries than we allow.
1721 if (state.ndesc == 1) {
1723 * Memory registration collapsed the sg-list into one entry,
1724 * so use a direct descriptor.
1726 struct srp_direct_buf *buf = (void *) cmd->add_data;
1728 *buf = req->indirect_desc[0];
1732 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1733 !target->allow_ext_sg)) {
1734 shost_printk(KERN_ERR, target->scsi_host,
1735 "Could not fit S/G list into SRP_CMD\n");
1740 count = min(state.ndesc, target->cmd_sg_cnt);
1741 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1742 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1744 fmt = SRP_DATA_DESC_INDIRECT;
1745 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1746 len += count * sizeof (struct srp_direct_buf);
1748 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1749 count * sizeof (struct srp_direct_buf));
1751 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1752 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1753 idb_len, &idb_rkey);
1758 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1761 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1762 indirect_hdr->table_desc.key = idb_rkey;
1763 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1764 indirect_hdr->len = cpu_to_be32(state.total_len);
1766 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1767 cmd->data_out_desc_cnt = count;
1769 cmd->data_in_desc_cnt = count;
1771 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1775 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1776 cmd->buf_fmt = fmt << 4;
1783 srp_unmap_data(scmnd, ch, req);
1784 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1790 * Return an IU and possible credit to the free pool
1792 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1793 enum srp_iu_type iu_type)
1795 unsigned long flags;
1797 spin_lock_irqsave(&ch->lock, flags);
1798 list_add(&iu->list, &ch->free_tx);
1799 if (iu_type != SRP_IU_RSP)
1801 spin_unlock_irqrestore(&ch->lock, flags);
1805 * Must be called with ch->lock held to protect req_lim and free_tx.
1806 * If IU is not sent, it must be returned using srp_put_tx_iu().
1809 * An upper limit for the number of allocated information units for each
1811 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1812 * more than Scsi_Host.can_queue requests.
1813 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1814 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1815 * one unanswered SRP request to an initiator.
1817 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1818 enum srp_iu_type iu_type)
1820 struct srp_target_port *target = ch->target;
1821 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1824 lockdep_assert_held(&ch->lock);
1826 ib_process_cq_direct(ch->send_cq, -1);
1828 if (list_empty(&ch->free_tx))
1831 /* Initiator responses to target requests do not consume credits */
1832 if (iu_type != SRP_IU_RSP) {
1833 if (ch->req_lim <= rsv) {
1834 ++target->zero_req_lim;
1841 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1842 list_del(&iu->list);
1847 * Note: if this function is called from inside ib_drain_sq() then it will
1848 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1849 * with status IB_WC_SUCCESS then that's a bug.
1851 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1853 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1854 struct srp_rdma_ch *ch = cq->cq_context;
1856 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1857 srp_handle_qp_err(cq, wc, "SEND");
1861 lockdep_assert_held(&ch->lock);
1863 list_add(&iu->list, &ch->free_tx);
1866 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1868 struct srp_target_port *target = ch->target;
1870 struct ib_send_wr wr, *bad_wr;
1872 list.addr = iu->dma;
1874 list.lkey = target->lkey;
1876 iu->cqe.done = srp_send_done;
1879 wr.wr_cqe = &iu->cqe;
1882 wr.opcode = IB_WR_SEND;
1883 wr.send_flags = IB_SEND_SIGNALED;
1885 return ib_post_send(ch->qp, &wr, &bad_wr);
1888 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1890 struct srp_target_port *target = ch->target;
1891 struct ib_recv_wr wr, *bad_wr;
1894 list.addr = iu->dma;
1895 list.length = iu->size;
1896 list.lkey = target->lkey;
1898 iu->cqe.done = srp_recv_done;
1901 wr.wr_cqe = &iu->cqe;
1905 return ib_post_recv(ch->qp, &wr, &bad_wr);
1908 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1910 struct srp_target_port *target = ch->target;
1911 struct srp_request *req;
1912 struct scsi_cmnd *scmnd;
1913 unsigned long flags;
1915 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1916 spin_lock_irqsave(&ch->lock, flags);
1917 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1918 if (rsp->tag == ch->tsk_mgmt_tag) {
1919 ch->tsk_mgmt_status = -1;
1920 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1921 ch->tsk_mgmt_status = rsp->data[3];
1922 complete(&ch->tsk_mgmt_done);
1924 shost_printk(KERN_ERR, target->scsi_host,
1925 "Received tsk mgmt response too late for tag %#llx\n",
1928 spin_unlock_irqrestore(&ch->lock, flags);
1930 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1931 if (scmnd && scmnd->host_scribble) {
1932 req = (void *)scmnd->host_scribble;
1933 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1938 shost_printk(KERN_ERR, target->scsi_host,
1939 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1940 rsp->tag, ch - target->ch, ch->qp->qp_num);
1942 spin_lock_irqsave(&ch->lock, flags);
1943 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1944 spin_unlock_irqrestore(&ch->lock, flags);
1948 scmnd->result = rsp->status;
1950 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1951 memcpy(scmnd->sense_buffer, rsp->data +
1952 be32_to_cpu(rsp->resp_data_len),
1953 min_t(int, be32_to_cpu(rsp->sense_data_len),
1954 SCSI_SENSE_BUFFERSIZE));
1957 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1958 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1959 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1960 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1961 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1962 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1963 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1964 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1966 srp_free_req(ch, req, scmnd,
1967 be32_to_cpu(rsp->req_lim_delta));
1969 scmnd->host_scribble = NULL;
1970 scmnd->scsi_done(scmnd);
1974 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1977 struct srp_target_port *target = ch->target;
1978 struct ib_device *dev = target->srp_host->srp_dev->dev;
1979 unsigned long flags;
1983 spin_lock_irqsave(&ch->lock, flags);
1984 ch->req_lim += req_delta;
1985 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1986 spin_unlock_irqrestore(&ch->lock, flags);
1989 shost_printk(KERN_ERR, target->scsi_host, PFX
1990 "no IU available to send response\n");
1994 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1995 memcpy(iu->buf, rsp, len);
1996 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1998 err = srp_post_send(ch, iu, len);
2000 shost_printk(KERN_ERR, target->scsi_host, PFX
2001 "unable to post response: %d\n", err);
2002 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2008 static void srp_process_cred_req(struct srp_rdma_ch *ch,
2009 struct srp_cred_req *req)
2011 struct srp_cred_rsp rsp = {
2012 .opcode = SRP_CRED_RSP,
2015 s32 delta = be32_to_cpu(req->req_lim_delta);
2017 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2018 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2019 "problems processing SRP_CRED_REQ\n");
2022 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2023 struct srp_aer_req *req)
2025 struct srp_target_port *target = ch->target;
2026 struct srp_aer_rsp rsp = {
2027 .opcode = SRP_AER_RSP,
2030 s32 delta = be32_to_cpu(req->req_lim_delta);
2032 shost_printk(KERN_ERR, target->scsi_host, PFX
2033 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2035 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2036 shost_printk(KERN_ERR, target->scsi_host, PFX
2037 "problems processing SRP_AER_REQ\n");
2040 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2042 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2043 struct srp_rdma_ch *ch = cq->cq_context;
2044 struct srp_target_port *target = ch->target;
2045 struct ib_device *dev = target->srp_host->srp_dev->dev;
2049 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2050 srp_handle_qp_err(cq, wc, "RECV");
2054 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2057 opcode = *(u8 *) iu->buf;
2060 shost_printk(KERN_ERR, target->scsi_host,
2061 PFX "recv completion, opcode 0x%02x\n", opcode);
2062 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2063 iu->buf, wc->byte_len, true);
2068 srp_process_rsp(ch, iu->buf);
2072 srp_process_cred_req(ch, iu->buf);
2076 srp_process_aer_req(ch, iu->buf);
2080 /* XXX Handle target logout */
2081 shost_printk(KERN_WARNING, target->scsi_host,
2082 PFX "Got target logout request\n");
2086 shost_printk(KERN_WARNING, target->scsi_host,
2087 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2091 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2094 res = srp_post_recv(ch, iu);
2096 shost_printk(KERN_ERR, target->scsi_host,
2097 PFX "Recv failed with error code %d\n", res);
2101 * srp_tl_err_work() - handle a transport layer error
2102 * @work: Work structure embedded in an SRP target port.
2104 * Note: This function may get invoked before the rport has been created,
2105 * hence the target->rport test.
2107 static void srp_tl_err_work(struct work_struct *work)
2109 struct srp_target_port *target;
2111 target = container_of(work, struct srp_target_port, tl_err_work);
2113 srp_start_tl_fail_timers(target->rport);
2116 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2119 struct srp_rdma_ch *ch = cq->cq_context;
2120 struct srp_target_port *target = ch->target;
2122 if (ch->connected && !target->qp_in_error) {
2123 shost_printk(KERN_ERR, target->scsi_host,
2124 PFX "failed %s status %s (%d) for CQE %p\n",
2125 opname, ib_wc_status_msg(wc->status), wc->status,
2127 queue_work(system_long_wq, &target->tl_err_work);
2129 target->qp_in_error = true;
2132 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2134 struct srp_target_port *target = host_to_target(shost);
2135 struct srp_rport *rport = target->rport;
2136 struct srp_rdma_ch *ch;
2137 struct srp_request *req;
2139 struct srp_cmd *cmd;
2140 struct ib_device *dev;
2141 unsigned long flags;
2145 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2148 * The SCSI EH thread is the only context from which srp_queuecommand()
2149 * can get invoked for blocked devices (SDEV_BLOCK /
2150 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2151 * locking the rport mutex if invoked from inside the SCSI EH.
2154 mutex_lock(&rport->mutex);
2156 scmnd->result = srp_chkready(target->rport);
2157 if (unlikely(scmnd->result))
2160 WARN_ON_ONCE(scmnd->request->tag < 0);
2161 tag = blk_mq_unique_tag(scmnd->request);
2162 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2163 idx = blk_mq_unique_tag_to_tag(tag);
2164 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2165 dev_name(&shost->shost_gendev), tag, idx,
2166 target->req_ring_size);
2168 spin_lock_irqsave(&ch->lock, flags);
2169 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2170 spin_unlock_irqrestore(&ch->lock, flags);
2175 req = &ch->req_ring[idx];
2176 dev = target->srp_host->srp_dev->dev;
2177 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2180 scmnd->host_scribble = (void *) req;
2183 memset(cmd, 0, sizeof *cmd);
2185 cmd->opcode = SRP_CMD;
2186 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2188 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2193 len = srp_map_data(scmnd, ch, req);
2195 shost_printk(KERN_ERR, target->scsi_host,
2196 PFX "Failed to map data (%d)\n", len);
2198 * If we ran out of memory descriptors (-ENOMEM) because an
2199 * application is queuing many requests with more than
2200 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2201 * to reduce queue depth temporarily.
2203 scmnd->result = len == -ENOMEM ?
2204 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2208 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2211 if (srp_post_send(ch, iu, len)) {
2212 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2213 scmnd->result = DID_ERROR << 16;
2221 mutex_unlock(&rport->mutex);
2226 srp_unmap_data(scmnd, ch, req);
2229 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2232 * Avoid that the loops that iterate over the request ring can
2233 * encounter a dangling SCSI command pointer.
2238 if (scmnd->result) {
2239 scmnd->scsi_done(scmnd);
2242 ret = SCSI_MLQUEUE_HOST_BUSY;
2249 * Note: the resources allocated in this function are freed in
2252 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2254 struct srp_target_port *target = ch->target;
2257 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2261 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2266 for (i = 0; i < target->queue_size; ++i) {
2267 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2269 GFP_KERNEL, DMA_FROM_DEVICE);
2270 if (!ch->rx_ring[i])
2274 for (i = 0; i < target->queue_size; ++i) {
2275 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2277 GFP_KERNEL, DMA_TO_DEVICE);
2278 if (!ch->tx_ring[i])
2281 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2287 for (i = 0; i < target->queue_size; ++i) {
2288 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2289 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2302 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2304 uint64_t T_tr_ns, max_compl_time_ms;
2305 uint32_t rq_tmo_jiffies;
2308 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2309 * table 91), both the QP timeout and the retry count have to be set
2310 * for RC QP's during the RTR to RTS transition.
2312 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2313 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2316 * Set target->rq_tmo_jiffies to one second more than the largest time
2317 * it can take before an error completion is generated. See also
2318 * C9-140..142 in the IBTA spec for more information about how to
2319 * convert the QP Local ACK Timeout value to nanoseconds.
2321 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2322 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2323 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2324 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2326 return rq_tmo_jiffies;
2329 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2330 const struct srp_login_rsp *lrsp,
2331 struct srp_rdma_ch *ch)
2333 struct srp_target_port *target = ch->target;
2334 struct ib_qp_attr *qp_attr = NULL;
2339 if (lrsp->opcode == SRP_LOGIN_RSP) {
2340 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2341 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2344 * Reserve credits for task management so we don't
2345 * bounce requests back to the SCSI mid-layer.
2347 target->scsi_host->can_queue
2348 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2349 target->scsi_host->can_queue);
2350 target->scsi_host->cmd_per_lun
2351 = min_t(int, target->scsi_host->can_queue,
2352 target->scsi_host->cmd_per_lun);
2354 shost_printk(KERN_WARNING, target->scsi_host,
2355 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2361 ret = srp_alloc_iu_bufs(ch);
2367 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2371 qp_attr->qp_state = IB_QPS_RTR;
2372 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2376 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2380 for (i = 0; i < target->queue_size; i++) {
2381 struct srp_iu *iu = ch->rx_ring[i];
2383 ret = srp_post_recv(ch, iu);
2388 qp_attr->qp_state = IB_QPS_RTS;
2389 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2393 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2395 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2399 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2408 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2409 struct ib_cm_event *event,
2410 struct srp_rdma_ch *ch)
2412 struct srp_target_port *target = ch->target;
2413 struct Scsi_Host *shost = target->scsi_host;
2414 struct ib_class_port_info *cpi;
2417 switch (event->param.rej_rcvd.reason) {
2418 case IB_CM_REJ_PORT_CM_REDIRECT:
2419 cpi = event->param.rej_rcvd.ari;
2420 sa_path_set_dlid(&ch->path, htonl(ntohs(cpi->redirect_lid)));
2421 ch->path.pkey = cpi->redirect_pkey;
2422 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2423 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2425 ch->status = sa_path_get_dlid(&ch->path) ?
2426 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2429 case IB_CM_REJ_PORT_REDIRECT:
2430 if (srp_target_is_topspin(target)) {
2432 * Topspin/Cisco SRP gateways incorrectly send
2433 * reject reason code 25 when they mean 24
2436 memcpy(ch->path.dgid.raw,
2437 event->param.rej_rcvd.ari, 16);
2439 shost_printk(KERN_DEBUG, shost,
2440 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2441 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2442 be64_to_cpu(ch->path.dgid.global.interface_id));
2444 ch->status = SRP_PORT_REDIRECT;
2446 shost_printk(KERN_WARNING, shost,
2447 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2448 ch->status = -ECONNRESET;
2452 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2453 shost_printk(KERN_WARNING, shost,
2454 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2455 ch->status = -ECONNRESET;
2458 case IB_CM_REJ_CONSUMER_DEFINED:
2459 opcode = *(u8 *) event->private_data;
2460 if (opcode == SRP_LOGIN_REJ) {
2461 struct srp_login_rej *rej = event->private_data;
2462 u32 reason = be32_to_cpu(rej->reason);
2464 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2465 shost_printk(KERN_WARNING, shost,
2466 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2468 shost_printk(KERN_WARNING, shost, PFX
2469 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2471 target->orig_dgid.raw, reason);
2473 shost_printk(KERN_WARNING, shost,
2474 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2475 " opcode 0x%02x\n", opcode);
2476 ch->status = -ECONNRESET;
2479 case IB_CM_REJ_STALE_CONN:
2480 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2481 ch->status = SRP_STALE_CONN;
2485 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2486 event->param.rej_rcvd.reason);
2487 ch->status = -ECONNRESET;
2491 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2493 struct srp_rdma_ch *ch = cm_id->context;
2494 struct srp_target_port *target = ch->target;
2497 switch (event->event) {
2498 case IB_CM_REQ_ERROR:
2499 shost_printk(KERN_DEBUG, target->scsi_host,
2500 PFX "Sending CM REQ failed\n");
2502 ch->status = -ECONNRESET;
2505 case IB_CM_REP_RECEIVED:
2507 srp_cm_rep_handler(cm_id, event->private_data, ch);
2510 case IB_CM_REJ_RECEIVED:
2511 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2514 srp_cm_rej_handler(cm_id, event, ch);
2517 case IB_CM_DREQ_RECEIVED:
2518 shost_printk(KERN_WARNING, target->scsi_host,
2519 PFX "DREQ received - connection closed\n");
2520 ch->connected = false;
2521 if (ib_send_cm_drep(cm_id, NULL, 0))
2522 shost_printk(KERN_ERR, target->scsi_host,
2523 PFX "Sending CM DREP failed\n");
2524 queue_work(system_long_wq, &target->tl_err_work);
2527 case IB_CM_TIMEWAIT_EXIT:
2528 shost_printk(KERN_ERR, target->scsi_host,
2529 PFX "connection closed\n");
2535 case IB_CM_MRA_RECEIVED:
2536 case IB_CM_DREQ_ERROR:
2537 case IB_CM_DREP_RECEIVED:
2541 shost_printk(KERN_WARNING, target->scsi_host,
2542 PFX "Unhandled CM event %d\n", event->event);
2547 complete(&ch->done);
2553 * srp_change_queue_depth - setting device queue depth
2554 * @sdev: scsi device struct
2555 * @qdepth: requested queue depth
2557 * Returns queue depth.
2560 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2562 if (!sdev->tagged_supported)
2564 return scsi_change_queue_depth(sdev, qdepth);
2567 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2568 u8 func, u8 *status)
2570 struct srp_target_port *target = ch->target;
2571 struct srp_rport *rport = target->rport;
2572 struct ib_device *dev = target->srp_host->srp_dev->dev;
2574 struct srp_tsk_mgmt *tsk_mgmt;
2577 if (!ch->connected || target->qp_in_error)
2581 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2582 * invoked while a task management function is being sent.
2584 mutex_lock(&rport->mutex);
2585 spin_lock_irq(&ch->lock);
2586 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2587 spin_unlock_irq(&ch->lock);
2590 mutex_unlock(&rport->mutex);
2595 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2598 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2600 tsk_mgmt->opcode = SRP_TSK_MGMT;
2601 int_to_scsilun(lun, &tsk_mgmt->lun);
2602 tsk_mgmt->tsk_mgmt_func = func;
2603 tsk_mgmt->task_tag = req_tag;
2605 spin_lock_irq(&ch->lock);
2606 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2607 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2608 spin_unlock_irq(&ch->lock);
2610 init_completion(&ch->tsk_mgmt_done);
2612 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2614 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2615 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2616 mutex_unlock(&rport->mutex);
2620 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2621 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2622 if (res > 0 && status)
2623 *status = ch->tsk_mgmt_status;
2624 mutex_unlock(&rport->mutex);
2626 WARN_ON_ONCE(res < 0);
2628 return res > 0 ? 0 : -1;
2631 static int srp_abort(struct scsi_cmnd *scmnd)
2633 struct srp_target_port *target = host_to_target(scmnd->device->host);
2634 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2637 struct srp_rdma_ch *ch;
2640 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2644 tag = blk_mq_unique_tag(scmnd->request);
2645 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2646 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2648 ch = &target->ch[ch_idx];
2649 if (!srp_claim_req(ch, req, NULL, scmnd))
2651 shost_printk(KERN_ERR, target->scsi_host,
2652 "Sending SRP abort for tag %#x\n", tag);
2653 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2654 SRP_TSK_ABORT_TASK, NULL) == 0)
2656 else if (target->rport->state == SRP_RPORT_LOST)
2660 if (ret == SUCCESS) {
2661 srp_free_req(ch, req, scmnd, 0);
2662 scmnd->result = DID_ABORT << 16;
2663 scmnd->scsi_done(scmnd);
2669 static int srp_reset_device(struct scsi_cmnd *scmnd)
2671 struct srp_target_port *target = host_to_target(scmnd->device->host);
2672 struct srp_rdma_ch *ch;
2675 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2677 ch = &target->ch[0];
2678 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2679 SRP_TSK_LUN_RESET, &status))
2687 static int srp_reset_host(struct scsi_cmnd *scmnd)
2689 struct srp_target_port *target = host_to_target(scmnd->device->host);
2691 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2693 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2696 static int srp_slave_alloc(struct scsi_device *sdev)
2698 struct Scsi_Host *shost = sdev->host;
2699 struct srp_target_port *target = host_to_target(shost);
2700 struct srp_device *srp_dev = target->srp_host->srp_dev;
2703 blk_queue_virt_boundary(sdev->request_queue,
2704 ~srp_dev->mr_page_mask);
2709 static int srp_slave_configure(struct scsi_device *sdev)
2711 struct Scsi_Host *shost = sdev->host;
2712 struct srp_target_port *target = host_to_target(shost);
2713 struct request_queue *q = sdev->request_queue;
2714 unsigned long timeout;
2716 if (sdev->type == TYPE_DISK) {
2717 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2718 blk_queue_rq_timeout(q, timeout);
2724 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2727 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2729 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2732 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2735 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2737 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2740 static ssize_t show_service_id(struct device *dev,
2741 struct device_attribute *attr, char *buf)
2743 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2745 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2748 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2751 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2753 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2756 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2759 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2761 return sprintf(buf, "%pI6\n", target->sgid.raw);
2764 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2767 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2768 struct srp_rdma_ch *ch = &target->ch[0];
2770 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2773 static ssize_t show_orig_dgid(struct device *dev,
2774 struct device_attribute *attr, char *buf)
2776 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2778 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2781 static ssize_t show_req_lim(struct device *dev,
2782 struct device_attribute *attr, char *buf)
2784 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2785 struct srp_rdma_ch *ch;
2786 int i, req_lim = INT_MAX;
2788 for (i = 0; i < target->ch_count; i++) {
2789 ch = &target->ch[i];
2790 req_lim = min(req_lim, ch->req_lim);
2792 return sprintf(buf, "%d\n", req_lim);
2795 static ssize_t show_zero_req_lim(struct device *dev,
2796 struct device_attribute *attr, char *buf)
2798 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2800 return sprintf(buf, "%d\n", target->zero_req_lim);
2803 static ssize_t show_local_ib_port(struct device *dev,
2804 struct device_attribute *attr, char *buf)
2806 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2808 return sprintf(buf, "%d\n", target->srp_host->port);
2811 static ssize_t show_local_ib_device(struct device *dev,
2812 struct device_attribute *attr, char *buf)
2814 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2816 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2819 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2822 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2824 return sprintf(buf, "%d\n", target->ch_count);
2827 static ssize_t show_comp_vector(struct device *dev,
2828 struct device_attribute *attr, char *buf)
2830 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2832 return sprintf(buf, "%d\n", target->comp_vector);
2835 static ssize_t show_tl_retry_count(struct device *dev,
2836 struct device_attribute *attr, char *buf)
2838 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2840 return sprintf(buf, "%d\n", target->tl_retry_count);
2843 static ssize_t show_cmd_sg_entries(struct device *dev,
2844 struct device_attribute *attr, char *buf)
2846 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2848 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2851 static ssize_t show_allow_ext_sg(struct device *dev,
2852 struct device_attribute *attr, char *buf)
2854 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2856 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2859 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2860 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2861 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2862 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2863 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2864 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2865 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2866 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2867 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2868 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2869 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2870 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2871 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2872 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2873 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2874 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2876 static struct device_attribute *srp_host_attrs[] = {
2879 &dev_attr_service_id,
2883 &dev_attr_orig_dgid,
2885 &dev_attr_zero_req_lim,
2886 &dev_attr_local_ib_port,
2887 &dev_attr_local_ib_device,
2889 &dev_attr_comp_vector,
2890 &dev_attr_tl_retry_count,
2891 &dev_attr_cmd_sg_entries,
2892 &dev_attr_allow_ext_sg,
2896 static struct scsi_host_template srp_template = {
2897 .module = THIS_MODULE,
2898 .name = "InfiniBand SRP initiator",
2899 .proc_name = DRV_NAME,
2900 .slave_alloc = srp_slave_alloc,
2901 .slave_configure = srp_slave_configure,
2902 .info = srp_target_info,
2903 .queuecommand = srp_queuecommand,
2904 .change_queue_depth = srp_change_queue_depth,
2905 .eh_timed_out = srp_timed_out,
2906 .eh_abort_handler = srp_abort,
2907 .eh_device_reset_handler = srp_reset_device,
2908 .eh_host_reset_handler = srp_reset_host,
2909 .skip_settle_delay = true,
2910 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2911 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2913 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2914 .use_clustering = ENABLE_CLUSTERING,
2915 .shost_attrs = srp_host_attrs,
2916 .track_queue_depth = 1,
2919 static int srp_sdev_count(struct Scsi_Host *host)
2921 struct scsi_device *sdev;
2924 shost_for_each_device(sdev, host)
2932 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2933 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2934 * removal has been scheduled.
2935 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2937 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2939 struct srp_rport_identifiers ids;
2940 struct srp_rport *rport;
2942 target->state = SRP_TARGET_SCANNING;
2943 sprintf(target->target_name, "SRP.T10:%016llX",
2944 be64_to_cpu(target->id_ext));
2946 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
2949 memcpy(ids.port_id, &target->id_ext, 8);
2950 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2951 ids.roles = SRP_RPORT_ROLE_TARGET;
2952 rport = srp_rport_add(target->scsi_host, &ids);
2953 if (IS_ERR(rport)) {
2954 scsi_remove_host(target->scsi_host);
2955 return PTR_ERR(rport);
2958 rport->lld_data = target;
2959 target->rport = rport;
2961 spin_lock(&host->target_lock);
2962 list_add_tail(&target->list, &host->target_list);
2963 spin_unlock(&host->target_lock);
2965 scsi_scan_target(&target->scsi_host->shost_gendev,
2966 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2968 if (srp_connected_ch(target) < target->ch_count ||
2969 target->qp_in_error) {
2970 shost_printk(KERN_INFO, target->scsi_host,
2971 PFX "SCSI scan failed - removing SCSI host\n");
2972 srp_queue_remove_work(target);
2976 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2977 dev_name(&target->scsi_host->shost_gendev),
2978 srp_sdev_count(target->scsi_host));
2980 spin_lock_irq(&target->lock);
2981 if (target->state == SRP_TARGET_SCANNING)
2982 target->state = SRP_TARGET_LIVE;
2983 spin_unlock_irq(&target->lock);
2989 static void srp_release_dev(struct device *dev)
2991 struct srp_host *host =
2992 container_of(dev, struct srp_host, dev);
2994 complete(&host->released);
2997 static struct class srp_class = {
2998 .name = "infiniband_srp",
2999 .dev_release = srp_release_dev
3003 * srp_conn_unique() - check whether the connection to a target is unique
3005 * @target: SRP target port.
3007 static bool srp_conn_unique(struct srp_host *host,
3008 struct srp_target_port *target)
3010 struct srp_target_port *t;
3013 if (target->state == SRP_TARGET_REMOVED)
3018 spin_lock(&host->target_lock);
3019 list_for_each_entry(t, &host->target_list, list) {
3021 target->id_ext == t->id_ext &&
3022 target->ioc_guid == t->ioc_guid &&
3023 target->initiator_ext == t->initiator_ext) {
3028 spin_unlock(&host->target_lock);
3035 * Target ports are added by writing
3037 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3038 * pkey=<P_Key>,service_id=<service ID>
3040 * to the add_target sysfs attribute.
3044 SRP_OPT_ID_EXT = 1 << 0,
3045 SRP_OPT_IOC_GUID = 1 << 1,
3046 SRP_OPT_DGID = 1 << 2,
3047 SRP_OPT_PKEY = 1 << 3,
3048 SRP_OPT_SERVICE_ID = 1 << 4,
3049 SRP_OPT_MAX_SECT = 1 << 5,
3050 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3051 SRP_OPT_IO_CLASS = 1 << 7,
3052 SRP_OPT_INITIATOR_EXT = 1 << 8,
3053 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3054 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3055 SRP_OPT_SG_TABLESIZE = 1 << 11,
3056 SRP_OPT_COMP_VECTOR = 1 << 12,
3057 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3058 SRP_OPT_QUEUE_SIZE = 1 << 14,
3059 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3063 SRP_OPT_SERVICE_ID),
3066 static const match_table_t srp_opt_tokens = {
3067 { SRP_OPT_ID_EXT, "id_ext=%s" },
3068 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3069 { SRP_OPT_DGID, "dgid=%s" },
3070 { SRP_OPT_PKEY, "pkey=%x" },
3071 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3072 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3073 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3074 { SRP_OPT_IO_CLASS, "io_class=%x" },
3075 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3076 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3077 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3078 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3079 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3080 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3081 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3082 { SRP_OPT_ERR, NULL }
3085 static int srp_parse_options(const char *buf, struct srp_target_port *target)
3087 char *options, *sep_opt;
3090 substring_t args[MAX_OPT_ARGS];
3096 options = kstrdup(buf, GFP_KERNEL);
3101 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3105 token = match_token(p, srp_opt_tokens, args);
3109 case SRP_OPT_ID_EXT:
3110 p = match_strdup(args);
3115 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3119 case SRP_OPT_IOC_GUID:
3120 p = match_strdup(args);
3125 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3130 p = match_strdup(args);
3135 if (strlen(p) != 32) {
3136 pr_warn("bad dest GID parameter '%s'\n", p);
3141 for (i = 0; i < 16; ++i) {
3142 strlcpy(dgid, p + i * 2, sizeof(dgid));
3143 if (sscanf(dgid, "%hhx",
3144 &target->orig_dgid.raw[i]) < 1) {
3154 if (match_hex(args, &token)) {
3155 pr_warn("bad P_Key parameter '%s'\n", p);
3158 target->pkey = cpu_to_be16(token);
3161 case SRP_OPT_SERVICE_ID:
3162 p = match_strdup(args);
3167 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3171 case SRP_OPT_MAX_SECT:
3172 if (match_int(args, &token)) {
3173 pr_warn("bad max sect parameter '%s'\n", p);
3176 target->scsi_host->max_sectors = token;
3179 case SRP_OPT_QUEUE_SIZE:
3180 if (match_int(args, &token) || token < 1) {
3181 pr_warn("bad queue_size parameter '%s'\n", p);
3184 target->scsi_host->can_queue = token;
3185 target->queue_size = token + SRP_RSP_SQ_SIZE +
3186 SRP_TSK_MGMT_SQ_SIZE;
3187 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3188 target->scsi_host->cmd_per_lun = token;
3191 case SRP_OPT_MAX_CMD_PER_LUN:
3192 if (match_int(args, &token) || token < 1) {
3193 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3197 target->scsi_host->cmd_per_lun = token;
3200 case SRP_OPT_IO_CLASS:
3201 if (match_hex(args, &token)) {
3202 pr_warn("bad IO class parameter '%s'\n", p);
3205 if (token != SRP_REV10_IB_IO_CLASS &&
3206 token != SRP_REV16A_IB_IO_CLASS) {
3207 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3208 token, SRP_REV10_IB_IO_CLASS,
3209 SRP_REV16A_IB_IO_CLASS);
3212 target->io_class = token;
3215 case SRP_OPT_INITIATOR_EXT:
3216 p = match_strdup(args);
3221 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3225 case SRP_OPT_CMD_SG_ENTRIES:
3226 if (match_int(args, &token) || token < 1 || token > 255) {
3227 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3231 target->cmd_sg_cnt = token;
3234 case SRP_OPT_ALLOW_EXT_SG:
3235 if (match_int(args, &token)) {
3236 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3239 target->allow_ext_sg = !!token;
3242 case SRP_OPT_SG_TABLESIZE:
3243 if (match_int(args, &token) || token < 1 ||
3244 token > SG_MAX_SEGMENTS) {
3245 pr_warn("bad max sg_tablesize parameter '%s'\n",
3249 target->sg_tablesize = token;
3252 case SRP_OPT_COMP_VECTOR:
3253 if (match_int(args, &token) || token < 0) {
3254 pr_warn("bad comp_vector parameter '%s'\n", p);
3257 target->comp_vector = token;
3260 case SRP_OPT_TL_RETRY_COUNT:
3261 if (match_int(args, &token) || token < 2 || token > 7) {
3262 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3266 target->tl_retry_count = token;
3270 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3276 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3279 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3280 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3281 !(srp_opt_tokens[i].token & opt_mask))
3282 pr_warn("target creation request is missing parameter '%s'\n",
3283 srp_opt_tokens[i].pattern);
3285 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3286 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3287 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3288 target->scsi_host->cmd_per_lun,
3289 target->scsi_host->can_queue);
3296 static ssize_t srp_create_target(struct device *dev,
3297 struct device_attribute *attr,
3298 const char *buf, size_t count)
3300 struct srp_host *host =
3301 container_of(dev, struct srp_host, dev);
3302 struct Scsi_Host *target_host;
3303 struct srp_target_port *target;
3304 struct srp_rdma_ch *ch;
3305 struct srp_device *srp_dev = host->srp_dev;
3306 struct ib_device *ibdev = srp_dev->dev;
3307 int ret, node_idx, node, cpu, i;
3308 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3309 bool multich = false;
3311 target_host = scsi_host_alloc(&srp_template,
3312 sizeof (struct srp_target_port));
3316 target_host->transportt = ib_srp_transport_template;
3317 target_host->max_channel = 0;
3318 target_host->max_id = 1;
3319 target_host->max_lun = -1LL;
3320 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3322 target = host_to_target(target_host);
3324 target->io_class = SRP_REV16A_IB_IO_CLASS;
3325 target->scsi_host = target_host;
3326 target->srp_host = host;
3327 target->pd = host->srp_dev->pd;
3328 target->lkey = host->srp_dev->pd->local_dma_lkey;
3329 target->cmd_sg_cnt = cmd_sg_entries;
3330 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3331 target->allow_ext_sg = allow_ext_sg;
3332 target->tl_retry_count = 7;
3333 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3336 * Avoid that the SCSI host can be removed by srp_remove_target()
3337 * before this function returns.
3339 scsi_host_get(target->scsi_host);
3341 ret = mutex_lock_interruptible(&host->add_target_mutex);
3345 ret = srp_parse_options(buf, target);
3349 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3351 if (!srp_conn_unique(target->srp_host, target)) {
3352 shost_printk(KERN_INFO, target->scsi_host,
3353 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3354 be64_to_cpu(target->id_ext),
3355 be64_to_cpu(target->ioc_guid),
3356 be64_to_cpu(target->initiator_ext));
3361 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3362 target->cmd_sg_cnt < target->sg_tablesize) {
3363 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3364 target->sg_tablesize = target->cmd_sg_cnt;
3367 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3369 * FR and FMR can only map one HCA page per entry. If the
3370 * start address is not aligned on a HCA page boundary two
3371 * entries will be used for the head and the tail although
3372 * these two entries combined contain at most one HCA page of
3373 * data. Hence the "+ 1" in the calculation below.
3375 * The indirect data buffer descriptor is contiguous so the
3376 * memory for that buffer will only be registered if
3377 * register_always is true. Hence add one to mr_per_cmd if
3378 * register_always has been set.
3380 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3381 (ilog2(srp_dev->mr_page_size) - 9);
3382 mr_per_cmd = register_always +
3383 (target->scsi_host->max_sectors + 1 +
3384 max_sectors_per_mr - 1) / max_sectors_per_mr;
3385 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3386 target->scsi_host->max_sectors,
3387 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3388 max_sectors_per_mr, mr_per_cmd);
3391 target_host->sg_tablesize = target->sg_tablesize;
3392 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3393 target->mr_per_cmd = mr_per_cmd;
3394 target->indirect_size = target->sg_tablesize *
3395 sizeof (struct srp_direct_buf);
3396 target->max_iu_len = sizeof (struct srp_cmd) +
3397 sizeof (struct srp_indirect_buf) +
3398 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3400 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3401 INIT_WORK(&target->remove_work, srp_remove_work);
3402 spin_lock_init(&target->lock);
3403 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3408 target->ch_count = max_t(unsigned, num_online_nodes(),
3410 min(4 * num_online_nodes(),
3411 ibdev->num_comp_vectors),
3412 num_online_cpus()));
3413 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3419 for_each_online_node(node) {
3420 const int ch_start = (node_idx * target->ch_count /
3421 num_online_nodes());
3422 const int ch_end = ((node_idx + 1) * target->ch_count /
3423 num_online_nodes());
3424 const int cv_start = node_idx * ibdev->num_comp_vectors /
3426 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3430 for_each_online_cpu(cpu) {
3431 if (cpu_to_node(cpu) != node)
3433 if (ch_start + cpu_idx >= ch_end)
3435 ch = &target->ch[ch_start + cpu_idx];
3436 ch->target = target;
3437 ch->comp_vector = cv_start == cv_end ? cv_start :
3438 cv_start + cpu_idx % (cv_end - cv_start);
3439 spin_lock_init(&ch->lock);
3440 INIT_LIST_HEAD(&ch->free_tx);
3441 ret = srp_new_cm_id(ch);
3443 goto err_disconnect;
3445 ret = srp_create_ch_ib(ch);
3447 goto err_disconnect;
3449 ret = srp_alloc_req_data(ch);
3451 goto err_disconnect;
3453 ret = srp_connect_ch(ch, multich);
3455 shost_printk(KERN_ERR, target->scsi_host,
3456 PFX "Connection %d/%d to %pI6 failed\n",
3459 ch->target->orig_dgid.raw);
3460 if (node_idx == 0 && cpu_idx == 0) {
3463 srp_free_ch_ib(target, ch);
3464 srp_free_req_data(target, ch);
3465 target->ch_count = ch - target->ch;
3477 target->scsi_host->nr_hw_queues = target->ch_count;
3479 ret = srp_add_target(host, target);
3481 goto err_disconnect;
3483 if (target->state != SRP_TARGET_REMOVED) {
3484 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3485 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3486 be64_to_cpu(target->id_ext),
3487 be64_to_cpu(target->ioc_guid),
3488 be16_to_cpu(target->pkey),
3489 be64_to_cpu(target->service_id),
3490 target->sgid.raw, target->orig_dgid.raw);
3496 mutex_unlock(&host->add_target_mutex);
3499 scsi_host_put(target->scsi_host);
3501 scsi_host_put(target->scsi_host);
3506 srp_disconnect_target(target);
3509 for (i = 0; i < target->ch_count; i++) {
3510 ch = &target->ch[i];
3511 srp_free_ch_ib(target, ch);
3512 srp_free_req_data(target, ch);
3519 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3521 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3524 struct srp_host *host = container_of(dev, struct srp_host, dev);
3526 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3529 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3531 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3534 struct srp_host *host = container_of(dev, struct srp_host, dev);
3536 return sprintf(buf, "%d\n", host->port);
3539 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3541 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3543 struct srp_host *host;
3545 host = kzalloc(sizeof *host, GFP_KERNEL);
3549 INIT_LIST_HEAD(&host->target_list);
3550 spin_lock_init(&host->target_lock);
3551 init_completion(&host->released);
3552 mutex_init(&host->add_target_mutex);
3553 host->srp_dev = device;
3556 host->dev.class = &srp_class;
3557 host->dev.parent = device->dev->dev.parent;
3558 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3560 if (device_register(&host->dev))
3562 if (device_create_file(&host->dev, &dev_attr_add_target))
3564 if (device_create_file(&host->dev, &dev_attr_ibdev))
3566 if (device_create_file(&host->dev, &dev_attr_port))
3572 device_unregister(&host->dev);
3580 static void srp_add_one(struct ib_device *device)
3582 struct srp_device *srp_dev;
3583 struct ib_device_attr *attr = &device->attrs;
3584 struct srp_host *host;
3585 int mr_page_shift, p;
3586 u64 max_pages_per_mr;
3587 unsigned int flags = 0;
3589 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3594 * Use the smallest page size supported by the HCA, down to a
3595 * minimum of 4096 bytes. We're unlikely to build large sglists
3596 * out of smaller entries.
3598 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3599 srp_dev->mr_page_size = 1 << mr_page_shift;
3600 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3601 max_pages_per_mr = attr->max_mr_size;
3602 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3603 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3604 attr->max_mr_size, srp_dev->mr_page_size,
3605 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3606 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3609 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3610 device->map_phys_fmr && device->unmap_fmr);
3611 srp_dev->has_fr = (attr->device_cap_flags &
3612 IB_DEVICE_MEM_MGT_EXTENSIONS);
3613 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3614 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3615 } else if (!never_register &&
3616 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
3617 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3618 (!srp_dev->has_fmr || prefer_fr));
3619 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3622 if (never_register || !register_always ||
3623 (!srp_dev->has_fmr && !srp_dev->has_fr))
3624 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3626 if (srp_dev->use_fast_reg) {
3627 srp_dev->max_pages_per_mr =
3628 min_t(u32, srp_dev->max_pages_per_mr,
3629 attr->max_fast_reg_page_list_len);
3631 srp_dev->mr_max_size = srp_dev->mr_page_size *
3632 srp_dev->max_pages_per_mr;
3633 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3634 device->name, mr_page_shift, attr->max_mr_size,
3635 attr->max_fast_reg_page_list_len,
3636 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3638 INIT_LIST_HEAD(&srp_dev->dev_list);
3640 srp_dev->dev = device;
3641 srp_dev->pd = ib_alloc_pd(device, flags);
3642 if (IS_ERR(srp_dev->pd))
3646 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3647 host = srp_add_port(srp_dev, p);
3649 list_add_tail(&host->list, &srp_dev->dev_list);
3652 ib_set_client_data(device, &srp_client, srp_dev);
3659 static void srp_remove_one(struct ib_device *device, void *client_data)
3661 struct srp_device *srp_dev;
3662 struct srp_host *host, *tmp_host;
3663 struct srp_target_port *target;
3665 srp_dev = client_data;
3669 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3670 device_unregister(&host->dev);
3672 * Wait for the sysfs entry to go away, so that no new
3673 * target ports can be created.
3675 wait_for_completion(&host->released);
3678 * Remove all target ports.
3680 spin_lock(&host->target_lock);
3681 list_for_each_entry(target, &host->target_list, list)
3682 srp_queue_remove_work(target);
3683 spin_unlock(&host->target_lock);
3686 * Wait for tl_err and target port removal tasks.
3688 flush_workqueue(system_long_wq);
3689 flush_workqueue(srp_remove_wq);
3694 ib_dealloc_pd(srp_dev->pd);
3699 static struct srp_function_template ib_srp_transport_functions = {
3700 .has_rport_state = true,
3701 .reset_timer_if_blocked = true,
3702 .reconnect_delay = &srp_reconnect_delay,
3703 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3704 .dev_loss_tmo = &srp_dev_loss_tmo,
3705 .reconnect = srp_rport_reconnect,
3706 .rport_delete = srp_rport_delete,
3707 .terminate_rport_io = srp_terminate_io,
3710 static int __init srp_init_module(void)
3714 if (srp_sg_tablesize) {
3715 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3716 if (!cmd_sg_entries)
3717 cmd_sg_entries = srp_sg_tablesize;
3720 if (!cmd_sg_entries)
3721 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3723 if (cmd_sg_entries > 255) {
3724 pr_warn("Clamping cmd_sg_entries to 255\n");
3725 cmd_sg_entries = 255;
3728 if (!indirect_sg_entries)
3729 indirect_sg_entries = cmd_sg_entries;
3730 else if (indirect_sg_entries < cmd_sg_entries) {
3731 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3733 indirect_sg_entries = cmd_sg_entries;
3736 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3737 pr_warn("Clamping indirect_sg_entries to %u\n",
3739 indirect_sg_entries = SG_MAX_SEGMENTS;
3742 srp_remove_wq = create_workqueue("srp_remove");
3743 if (!srp_remove_wq) {
3749 ib_srp_transport_template =
3750 srp_attach_transport(&ib_srp_transport_functions);
3751 if (!ib_srp_transport_template)
3754 ret = class_register(&srp_class);
3756 pr_err("couldn't register class infiniband_srp\n");
3760 ib_sa_register_client(&srp_sa_client);
3762 ret = ib_register_client(&srp_client);
3764 pr_err("couldn't register IB client\n");
3772 ib_sa_unregister_client(&srp_sa_client);
3773 class_unregister(&srp_class);
3776 srp_release_transport(ib_srp_transport_template);
3779 destroy_workqueue(srp_remove_wq);
3783 static void __exit srp_cleanup_module(void)
3785 ib_unregister_client(&srp_client);
3786 ib_sa_unregister_client(&srp_sa_client);
3787 class_unregister(&srp_class);
3788 srp_release_transport(ib_srp_transport_template);
3789 destroy_workqueue(srp_remove_wq);
3792 module_init(srp_init_module);
3793 module_exit(srp_cleanup_module);