1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
18 #include <scsi/scsi_transport_fc.h>
20 /* *************************** Data Structures/Defines ****************** */
23 enum nvme_fc_queue_flags {
24 NVME_FC_Q_CONNECTED = 0,
28 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
29 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
30 * when connected and a
34 struct nvme_fc_queue {
35 struct nvme_fc_ctrl *ctrl;
37 struct blk_mq_hw_ctx *hctx;
39 size_t cmnd_capsule_len;
48 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
50 enum nvme_fcop_flags {
51 FCOP_FLAGS_TERMIO = (1 << 0),
52 FCOP_FLAGS_AEN = (1 << 1),
55 struct nvmefc_ls_req_op {
56 struct nvmefc_ls_req ls_req;
58 struct nvme_fc_rport *rport;
59 struct nvme_fc_queue *queue;
64 struct completion ls_done;
65 struct list_head lsreq_list; /* rport->ls_req_list */
69 struct nvmefc_ls_rcv_op {
70 struct nvme_fc_rport *rport;
71 struct nvmefc_ls_rsp *lsrsp;
72 union nvmefc_ls_requests *rqstbuf;
73 union nvmefc_ls_responses *rspbuf;
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
78 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
80 enum nvme_fcpop_state {
81 FCPOP_STATE_UNINIT = 0,
83 FCPOP_STATE_ACTIVE = 2,
84 FCPOP_STATE_ABORTED = 3,
85 FCPOP_STATE_COMPLETE = 4,
88 struct nvme_fc_fcp_op {
89 struct nvme_request nreq; /*
92 * the 1st element in the
97 struct nvmefc_fcp_req fcp_req;
99 struct nvme_fc_ctrl *ctrl;
100 struct nvme_fc_queue *queue;
108 struct nvme_fc_cmd_iu cmd_iu;
109 struct nvme_fc_ersp_iu rsp_iu;
112 struct nvme_fcp_op_w_sgl {
113 struct nvme_fc_fcp_op op;
114 struct scatterlist sgl[NVME_INLINE_SG_CNT];
118 struct nvme_fc_lport {
119 struct nvme_fc_local_port localport;
122 struct list_head port_list; /* nvme_fc_port_list */
123 struct list_head endp_list;
124 struct device *dev; /* physical device for dma */
125 struct nvme_fc_port_template *ops;
127 atomic_t act_rport_cnt;
128 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
130 struct nvme_fc_rport {
131 struct nvme_fc_remote_port remoteport;
133 struct list_head endp_list; /* for lport->endp_list */
134 struct list_head ctrl_list;
135 struct list_head ls_req_list;
136 struct list_head ls_rcv_list;
137 struct list_head disc_list;
138 struct device *dev; /* physical device for dma */
139 struct nvme_fc_lport *lport;
142 atomic_t act_ctrl_cnt;
143 unsigned long dev_loss_end;
144 struct work_struct lsrcv_work;
145 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
147 /* fc_ctrl flags values - specified as bit positions */
148 #define ASSOC_ACTIVE 0
149 #define ASSOC_FAILED 1
150 #define FCCTRL_TERMIO 2
152 struct nvme_fc_ctrl {
154 struct nvme_fc_queue *queues;
156 struct nvme_fc_lport *lport;
157 struct nvme_fc_rport *rport;
162 struct nvmefc_ls_rcv_op *rcv_disconn;
164 struct list_head ctrl_list; /* rport->ctrl_list */
166 struct blk_mq_tag_set admin_tag_set;
167 struct blk_mq_tag_set tag_set;
169 struct work_struct ioerr_work;
170 struct delayed_work connect_work;
175 wait_queue_head_t ioabort_wait;
177 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
179 struct nvme_ctrl ctrl;
182 static inline struct nvme_fc_ctrl *
183 to_fc_ctrl(struct nvme_ctrl *ctrl)
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
188 static inline struct nvme_fc_lport *
189 localport_to_lport(struct nvme_fc_local_port *portptr)
191 return container_of(portptr, struct nvme_fc_lport, localport);
194 static inline struct nvme_fc_rport *
195 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
197 return container_of(portptr, struct nvme_fc_rport, remoteport);
200 static inline struct nvmefc_ls_req_op *
201 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
203 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
206 static inline struct nvme_fc_fcp_op *
207 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
209 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
214 /* *************************** Globals **************************** */
217 static DEFINE_SPINLOCK(nvme_fc_lock);
219 static LIST_HEAD(nvme_fc_lport_list);
220 static DEFINE_IDA(nvme_fc_local_port_cnt);
221 static DEFINE_IDA(nvme_fc_ctrl_cnt);
224 * These items are short-term. They will eventually be moved into
225 * a generic FC class. See comments in module init.
227 static struct device *fc_udev_device;
229 static void nvme_fc_complete_rq(struct request *rq);
231 /* *********************** FC-NVME Port Management ************************ */
233 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
234 struct nvme_fc_queue *, unsigned int);
236 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
240 nvme_fc_free_lport(struct kref *ref)
242 struct nvme_fc_lport *lport =
243 container_of(ref, struct nvme_fc_lport, ref);
246 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
247 WARN_ON(!list_empty(&lport->endp_list));
249 /* remove from transport list */
250 spin_lock_irqsave(&nvme_fc_lock, flags);
251 list_del(&lport->port_list);
252 spin_unlock_irqrestore(&nvme_fc_lock, flags);
254 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
255 ida_destroy(&lport->endp_cnt);
257 put_device(lport->dev);
263 nvme_fc_lport_put(struct nvme_fc_lport *lport)
265 kref_put(&lport->ref, nvme_fc_free_lport);
269 nvme_fc_lport_get(struct nvme_fc_lport *lport)
271 return kref_get_unless_zero(&lport->ref);
275 static struct nvme_fc_lport *
276 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
277 struct nvme_fc_port_template *ops,
280 struct nvme_fc_lport *lport;
283 spin_lock_irqsave(&nvme_fc_lock, flags);
285 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
286 if (lport->localport.node_name != pinfo->node_name ||
287 lport->localport.port_name != pinfo->port_name)
290 if (lport->dev != dev) {
291 lport = ERR_PTR(-EXDEV);
295 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
296 lport = ERR_PTR(-EEXIST);
300 if (!nvme_fc_lport_get(lport)) {
302 * fails if ref cnt already 0. If so,
303 * act as if lport already deleted
309 /* resume the lport */
312 lport->localport.port_role = pinfo->port_role;
313 lport->localport.port_id = pinfo->port_id;
314 lport->localport.port_state = FC_OBJSTATE_ONLINE;
316 spin_unlock_irqrestore(&nvme_fc_lock, flags);
324 spin_unlock_irqrestore(&nvme_fc_lock, flags);
330 * nvme_fc_register_localport - transport entry point called by an
331 * LLDD to register the existence of a NVME
333 * @pinfo: pointer to information about the port to be registered
334 * @template: LLDD entrypoints and operational parameters for the port
335 * @dev: physical hardware device node port corresponds to. Will be
336 * used for DMA mappings
337 * @portptr: pointer to a local port pointer. Upon success, the routine
338 * will allocate a nvme_fc_local_port structure and place its
339 * address in the local port pointer. Upon failure, local port
340 * pointer will be set to 0.
343 * a completion status. Must be 0 upon success; a negative errno
344 * (ex: -ENXIO) upon failure.
347 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
348 struct nvme_fc_port_template *template,
350 struct nvme_fc_local_port **portptr)
352 struct nvme_fc_lport *newrec;
356 if (!template->localport_delete || !template->remoteport_delete ||
357 !template->ls_req || !template->fcp_io ||
358 !template->ls_abort || !template->fcp_abort ||
359 !template->max_hw_queues || !template->max_sgl_segments ||
360 !template->max_dif_sgl_segments || !template->dma_boundary) {
362 goto out_reghost_failed;
366 * look to see if there is already a localport that had been
367 * deregistered and in the process of waiting for all the
368 * references to fully be removed. If the references haven't
369 * expired, we can simply re-enable the localport. Remoteports
370 * and controller reconnections should resume naturally.
372 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
374 /* found an lport, but something about its state is bad */
375 if (IS_ERR(newrec)) {
376 ret = PTR_ERR(newrec);
377 goto out_reghost_failed;
379 /* found existing lport, which was resumed */
381 *portptr = &newrec->localport;
385 /* nothing found - allocate a new localport struct */
387 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
391 goto out_reghost_failed;
394 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
400 if (!get_device(dev) && dev) {
405 INIT_LIST_HEAD(&newrec->port_list);
406 INIT_LIST_HEAD(&newrec->endp_list);
407 kref_init(&newrec->ref);
408 atomic_set(&newrec->act_rport_cnt, 0);
409 newrec->ops = template;
411 ida_init(&newrec->endp_cnt);
412 if (template->local_priv_sz)
413 newrec->localport.private = &newrec[1];
415 newrec->localport.private = NULL;
416 newrec->localport.node_name = pinfo->node_name;
417 newrec->localport.port_name = pinfo->port_name;
418 newrec->localport.port_role = pinfo->port_role;
419 newrec->localport.port_id = pinfo->port_id;
420 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
421 newrec->localport.port_num = idx;
423 spin_lock_irqsave(&nvme_fc_lock, flags);
424 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
425 spin_unlock_irqrestore(&nvme_fc_lock, flags);
428 dma_set_seg_boundary(dev, template->dma_boundary);
430 *portptr = &newrec->localport;
434 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
442 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
445 * nvme_fc_unregister_localport - transport entry point called by an
446 * LLDD to deregister/remove a previously
447 * registered a NVME host FC port.
448 * @portptr: pointer to the (registered) local port that is to be deregistered.
451 * a completion status. Must be 0 upon success; a negative errno
452 * (ex: -ENXIO) upon failure.
455 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
457 struct nvme_fc_lport *lport = localport_to_lport(portptr);
463 spin_lock_irqsave(&nvme_fc_lock, flags);
465 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
466 spin_unlock_irqrestore(&nvme_fc_lock, flags);
469 portptr->port_state = FC_OBJSTATE_DELETED;
471 spin_unlock_irqrestore(&nvme_fc_lock, flags);
473 if (atomic_read(&lport->act_rport_cnt) == 0)
474 lport->ops->localport_delete(&lport->localport);
476 nvme_fc_lport_put(lport);
480 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
483 * TRADDR strings, per FC-NVME are fixed format:
484 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
485 * udev event will only differ by prefix of what field is
487 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
488 * 19 + 43 + null_fudge = 64 characters
490 #define FCNVME_TRADDR_LENGTH 64
493 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
494 struct nvme_fc_rport *rport)
496 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
497 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
498 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
500 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
503 snprintf(hostaddr, sizeof(hostaddr),
504 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
505 lport->localport.node_name, lport->localport.port_name);
506 snprintf(tgtaddr, sizeof(tgtaddr),
507 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
508 rport->remoteport.node_name, rport->remoteport.port_name);
509 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
513 nvme_fc_free_rport(struct kref *ref)
515 struct nvme_fc_rport *rport =
516 container_of(ref, struct nvme_fc_rport, ref);
517 struct nvme_fc_lport *lport =
518 localport_to_lport(rport->remoteport.localport);
521 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
522 WARN_ON(!list_empty(&rport->ctrl_list));
524 /* remove from lport list */
525 spin_lock_irqsave(&nvme_fc_lock, flags);
526 list_del(&rport->endp_list);
527 spin_unlock_irqrestore(&nvme_fc_lock, flags);
529 WARN_ON(!list_empty(&rport->disc_list));
530 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
534 nvme_fc_lport_put(lport);
538 nvme_fc_rport_put(struct nvme_fc_rport *rport)
540 kref_put(&rport->ref, nvme_fc_free_rport);
544 nvme_fc_rport_get(struct nvme_fc_rport *rport)
546 return kref_get_unless_zero(&rport->ref);
550 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
552 switch (ctrl->ctrl.state) {
554 case NVME_CTRL_CONNECTING:
556 * As all reconnects were suppressed, schedule a
559 dev_info(ctrl->ctrl.device,
560 "NVME-FC{%d}: connectivity re-established. "
561 "Attempting reconnect\n", ctrl->cnum);
563 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
566 case NVME_CTRL_RESETTING:
568 * Controller is already in the process of terminating the
569 * association. No need to do anything further. The reconnect
570 * step will naturally occur after the reset completes.
575 /* no action to take - let it delete */
580 static struct nvme_fc_rport *
581 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
582 struct nvme_fc_port_info *pinfo)
584 struct nvme_fc_rport *rport;
585 struct nvme_fc_ctrl *ctrl;
588 spin_lock_irqsave(&nvme_fc_lock, flags);
590 list_for_each_entry(rport, &lport->endp_list, endp_list) {
591 if (rport->remoteport.node_name != pinfo->node_name ||
592 rport->remoteport.port_name != pinfo->port_name)
595 if (!nvme_fc_rport_get(rport)) {
596 rport = ERR_PTR(-ENOLCK);
600 spin_unlock_irqrestore(&nvme_fc_lock, flags);
602 spin_lock_irqsave(&rport->lock, flags);
604 /* has it been unregistered */
605 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
606 /* means lldd called us twice */
607 spin_unlock_irqrestore(&rport->lock, flags);
608 nvme_fc_rport_put(rport);
609 return ERR_PTR(-ESTALE);
612 rport->remoteport.port_role = pinfo->port_role;
613 rport->remoteport.port_id = pinfo->port_id;
614 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
615 rport->dev_loss_end = 0;
618 * kick off a reconnect attempt on all associations to the
619 * remote port. A successful reconnects will resume i/o.
621 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
622 nvme_fc_resume_controller(ctrl);
624 spin_unlock_irqrestore(&rport->lock, flags);
632 spin_unlock_irqrestore(&nvme_fc_lock, flags);
638 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
639 struct nvme_fc_port_info *pinfo)
641 if (pinfo->dev_loss_tmo)
642 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
644 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
648 * nvme_fc_register_remoteport - transport entry point called by an
649 * LLDD to register the existence of a NVME
650 * subsystem FC port on its fabric.
651 * @localport: pointer to the (registered) local port that the remote
652 * subsystem port is connected to.
653 * @pinfo: pointer to information about the port to be registered
654 * @portptr: pointer to a remote port pointer. Upon success, the routine
655 * will allocate a nvme_fc_remote_port structure and place its
656 * address in the remote port pointer. Upon failure, remote port
657 * pointer will be set to 0.
660 * a completion status. Must be 0 upon success; a negative errno
661 * (ex: -ENXIO) upon failure.
664 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
665 struct nvme_fc_port_info *pinfo,
666 struct nvme_fc_remote_port **portptr)
668 struct nvme_fc_lport *lport = localport_to_lport(localport);
669 struct nvme_fc_rport *newrec;
673 if (!nvme_fc_lport_get(lport)) {
675 goto out_reghost_failed;
679 * look to see if there is already a remoteport that is waiting
680 * for a reconnect (within dev_loss_tmo) with the same WWN's.
681 * If so, transition to it and reconnect.
683 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
685 /* found an rport, but something about its state is bad */
686 if (IS_ERR(newrec)) {
687 ret = PTR_ERR(newrec);
690 /* found existing rport, which was resumed */
692 nvme_fc_lport_put(lport);
693 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
694 nvme_fc_signal_discovery_scan(lport, newrec);
695 *portptr = &newrec->remoteport;
699 /* nothing found - allocate a new remoteport struct */
701 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
708 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
711 goto out_kfree_rport;
714 INIT_LIST_HEAD(&newrec->endp_list);
715 INIT_LIST_HEAD(&newrec->ctrl_list);
716 INIT_LIST_HEAD(&newrec->ls_req_list);
717 INIT_LIST_HEAD(&newrec->disc_list);
718 kref_init(&newrec->ref);
719 atomic_set(&newrec->act_ctrl_cnt, 0);
720 spin_lock_init(&newrec->lock);
721 newrec->remoteport.localport = &lport->localport;
722 INIT_LIST_HEAD(&newrec->ls_rcv_list);
723 newrec->dev = lport->dev;
724 newrec->lport = lport;
725 if (lport->ops->remote_priv_sz)
726 newrec->remoteport.private = &newrec[1];
728 newrec->remoteport.private = NULL;
729 newrec->remoteport.port_role = pinfo->port_role;
730 newrec->remoteport.node_name = pinfo->node_name;
731 newrec->remoteport.port_name = pinfo->port_name;
732 newrec->remoteport.port_id = pinfo->port_id;
733 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
734 newrec->remoteport.port_num = idx;
735 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
736 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
738 spin_lock_irqsave(&nvme_fc_lock, flags);
739 list_add_tail(&newrec->endp_list, &lport->endp_list);
740 spin_unlock_irqrestore(&nvme_fc_lock, flags);
742 nvme_fc_signal_discovery_scan(lport, newrec);
744 *portptr = &newrec->remoteport;
750 nvme_fc_lport_put(lport);
755 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
758 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
760 struct nvmefc_ls_req_op *lsop;
764 spin_lock_irqsave(&rport->lock, flags);
766 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
767 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
768 lsop->flags |= FCOP_FLAGS_TERMIO;
769 spin_unlock_irqrestore(&rport->lock, flags);
770 rport->lport->ops->ls_abort(&rport->lport->localport,
776 spin_unlock_irqrestore(&rport->lock, flags);
782 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
784 dev_info(ctrl->ctrl.device,
785 "NVME-FC{%d}: controller connectivity lost. Awaiting "
786 "Reconnect", ctrl->cnum);
788 switch (ctrl->ctrl.state) {
792 * Schedule a controller reset. The reset will terminate the
793 * association and schedule the reconnect timer. Reconnects
794 * will be attempted until either the ctlr_loss_tmo
795 * (max_retries * connect_delay) expires or the remoteport's
796 * dev_loss_tmo expires.
798 if (nvme_reset_ctrl(&ctrl->ctrl)) {
799 dev_warn(ctrl->ctrl.device,
800 "NVME-FC{%d}: Couldn't schedule reset.\n",
802 nvme_delete_ctrl(&ctrl->ctrl);
806 case NVME_CTRL_CONNECTING:
808 * The association has already been terminated and the
809 * controller is attempting reconnects. No need to do anything
810 * futher. Reconnects will be attempted until either the
811 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
812 * remoteport's dev_loss_tmo expires.
816 case NVME_CTRL_RESETTING:
818 * Controller is already in the process of terminating the
819 * association. No need to do anything further. The reconnect
820 * step will kick in naturally after the association is
825 case NVME_CTRL_DELETING:
826 case NVME_CTRL_DELETING_NOIO:
828 /* no action to take - let it delete */
834 * nvme_fc_unregister_remoteport - transport entry point called by an
835 * LLDD to deregister/remove a previously
836 * registered a NVME subsystem FC port.
837 * @portptr: pointer to the (registered) remote port that is to be
841 * a completion status. Must be 0 upon success; a negative errno
842 * (ex: -ENXIO) upon failure.
845 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
847 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
848 struct nvme_fc_ctrl *ctrl;
854 spin_lock_irqsave(&rport->lock, flags);
856 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
857 spin_unlock_irqrestore(&rport->lock, flags);
860 portptr->port_state = FC_OBJSTATE_DELETED;
862 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
864 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
865 /* if dev_loss_tmo==0, dev loss is immediate */
866 if (!portptr->dev_loss_tmo) {
867 dev_warn(ctrl->ctrl.device,
868 "NVME-FC{%d}: controller connectivity lost.\n",
870 nvme_delete_ctrl(&ctrl->ctrl);
872 nvme_fc_ctrl_connectivity_loss(ctrl);
875 spin_unlock_irqrestore(&rport->lock, flags);
877 nvme_fc_abort_lsops(rport);
879 if (atomic_read(&rport->act_ctrl_cnt) == 0)
880 rport->lport->ops->remoteport_delete(portptr);
883 * release the reference, which will allow, if all controllers
884 * go away, which should only occur after dev_loss_tmo occurs,
885 * for the rport to be torn down.
887 nvme_fc_rport_put(rport);
891 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
894 * nvme_fc_rescan_remoteport - transport entry point called by an
895 * LLDD to request a nvme device rescan.
896 * @remoteport: pointer to the (registered) remote port that is to be
902 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
904 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
906 nvme_fc_signal_discovery_scan(rport->lport, rport);
908 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
911 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
914 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
917 spin_lock_irqsave(&rport->lock, flags);
919 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
920 spin_unlock_irqrestore(&rport->lock, flags);
924 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
925 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
927 spin_unlock_irqrestore(&rport->lock, flags);
931 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
934 /* *********************** FC-NVME DMA Handling **************************** */
937 * The fcloop device passes in a NULL device pointer. Real LLD's will
938 * pass in a valid device pointer. If NULL is passed to the dma mapping
939 * routines, depending on the platform, it may or may not succeed, and
943 * Wrapper all the dma routines and check the dev pointer.
945 * If simple mappings (return just a dma address, we'll noop them,
946 * returning a dma address of 0.
948 * On more complex mappings (dma_map_sg), a pseudo routine fills
949 * in the scatter list, setting all dma addresses to 0.
952 static inline dma_addr_t
953 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
954 enum dma_data_direction dir)
956 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
960 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
962 return dev ? dma_mapping_error(dev, dma_addr) : 0;
966 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
967 enum dma_data_direction dir)
970 dma_unmap_single(dev, addr, size, dir);
974 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
975 enum dma_data_direction dir)
978 dma_sync_single_for_cpu(dev, addr, size, dir);
982 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
983 enum dma_data_direction dir)
986 dma_sync_single_for_device(dev, addr, size, dir);
989 /* pseudo dma_map_sg call */
991 fc_map_sg(struct scatterlist *sg, int nents)
993 struct scatterlist *s;
996 WARN_ON(nents == 0 || sg[0].length == 0);
998 for_each_sg(sg, s, nents, i) {
1000 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1001 s->dma_length = s->length;
1008 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1009 enum dma_data_direction dir)
1011 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1015 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1016 enum dma_data_direction dir)
1019 dma_unmap_sg(dev, sg, nents, dir);
1022 /* *********************** FC-NVME LS Handling **************************** */
1024 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1025 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1027 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1030 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1032 struct nvme_fc_rport *rport = lsop->rport;
1033 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1034 unsigned long flags;
1036 spin_lock_irqsave(&rport->lock, flags);
1038 if (!lsop->req_queued) {
1039 spin_unlock_irqrestore(&rport->lock, flags);
1043 list_del(&lsop->lsreq_list);
1045 lsop->req_queued = false;
1047 spin_unlock_irqrestore(&rport->lock, flags);
1049 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1050 (lsreq->rqstlen + lsreq->rsplen),
1053 nvme_fc_rport_put(rport);
1057 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1058 struct nvmefc_ls_req_op *lsop,
1059 void (*done)(struct nvmefc_ls_req *req, int status))
1061 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1062 unsigned long flags;
1065 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1066 return -ECONNREFUSED;
1068 if (!nvme_fc_rport_get(rport))
1072 lsop->rport = rport;
1073 lsop->req_queued = false;
1074 INIT_LIST_HEAD(&lsop->lsreq_list);
1075 init_completion(&lsop->ls_done);
1077 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1078 lsreq->rqstlen + lsreq->rsplen,
1080 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1084 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1086 spin_lock_irqsave(&rport->lock, flags);
1088 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1090 lsop->req_queued = true;
1092 spin_unlock_irqrestore(&rport->lock, flags);
1094 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1095 &rport->remoteport, lsreq);
1102 lsop->ls_error = ret;
1103 spin_lock_irqsave(&rport->lock, flags);
1104 lsop->req_queued = false;
1105 list_del(&lsop->lsreq_list);
1106 spin_unlock_irqrestore(&rport->lock, flags);
1107 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1108 (lsreq->rqstlen + lsreq->rsplen),
1111 nvme_fc_rport_put(rport);
1117 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1119 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1121 lsop->ls_error = status;
1122 complete(&lsop->ls_done);
1126 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1128 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1129 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1132 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1136 * No timeout/not interruptible as we need the struct
1137 * to exist until the lldd calls us back. Thus mandate
1138 * wait until driver calls back. lldd responsible for
1139 * the timeout action
1141 wait_for_completion(&lsop->ls_done);
1143 __nvme_fc_finish_ls_req(lsop);
1145 ret = lsop->ls_error;
1151 /* ACC or RJT payload ? */
1152 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1159 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1160 struct nvmefc_ls_req_op *lsop,
1161 void (*done)(struct nvmefc_ls_req *req, int status))
1163 /* don't wait for completion */
1165 return __nvme_fc_send_ls_req(rport, lsop, done);
1169 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1170 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1172 struct nvmefc_ls_req_op *lsop;
1173 struct nvmefc_ls_req *lsreq;
1174 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1175 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1176 unsigned long flags;
1179 lsop = kzalloc((sizeof(*lsop) +
1180 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1181 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1183 dev_info(ctrl->ctrl.device,
1184 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1190 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1191 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1192 lsreq = &lsop->ls_req;
1193 if (ctrl->lport->ops->lsrqst_priv_sz)
1194 lsreq->private = &assoc_acc[1];
1196 lsreq->private = NULL;
1198 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1199 assoc_rqst->desc_list_len =
1200 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1202 assoc_rqst->assoc_cmd.desc_tag =
1203 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1204 assoc_rqst->assoc_cmd.desc_len =
1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1210 /* Linux supports only Dynamic controllers */
1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1213 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1214 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1215 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1216 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1218 lsop->queue = queue;
1219 lsreq->rqstaddr = assoc_rqst;
1220 lsreq->rqstlen = sizeof(*assoc_rqst);
1221 lsreq->rspaddr = assoc_acc;
1222 lsreq->rsplen = sizeof(*assoc_acc);
1223 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1225 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1227 goto out_free_buffer;
1229 /* process connect LS completion */
1231 /* validate the ACC response */
1232 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1234 else if (assoc_acc->hdr.desc_list_len !=
1236 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1237 fcret = VERR_CR_ASSOC_ACC_LEN;
1238 else if (assoc_acc->hdr.rqst.desc_tag !=
1239 cpu_to_be32(FCNVME_LSDESC_RQST))
1240 fcret = VERR_LSDESC_RQST;
1241 else if (assoc_acc->hdr.rqst.desc_len !=
1242 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1243 fcret = VERR_LSDESC_RQST_LEN;
1244 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1245 fcret = VERR_CR_ASSOC;
1246 else if (assoc_acc->associd.desc_tag !=
1247 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1248 fcret = VERR_ASSOC_ID;
1249 else if (assoc_acc->associd.desc_len !=
1251 sizeof(struct fcnvme_lsdesc_assoc_id)))
1252 fcret = VERR_ASSOC_ID_LEN;
1253 else if (assoc_acc->connectid.desc_tag !=
1254 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1255 fcret = VERR_CONN_ID;
1256 else if (assoc_acc->connectid.desc_len !=
1257 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1258 fcret = VERR_CONN_ID_LEN;
1263 "q %d Create Association LS failed: %s\n",
1264 queue->qnum, validation_errors[fcret]);
1266 spin_lock_irqsave(&ctrl->lock, flags);
1267 ctrl->association_id =
1268 be64_to_cpu(assoc_acc->associd.association_id);
1269 queue->connection_id =
1270 be64_to_cpu(assoc_acc->connectid.connection_id);
1271 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1272 spin_unlock_irqrestore(&ctrl->lock, flags);
1280 "queue %d connect admin queue failed (%d).\n",
1286 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1287 u16 qsize, u16 ersp_ratio)
1289 struct nvmefc_ls_req_op *lsop;
1290 struct nvmefc_ls_req *lsreq;
1291 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1292 struct fcnvme_ls_cr_conn_acc *conn_acc;
1295 lsop = kzalloc((sizeof(*lsop) +
1296 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1297 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1299 dev_info(ctrl->ctrl.device,
1300 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1306 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1307 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1308 lsreq = &lsop->ls_req;
1309 if (ctrl->lport->ops->lsrqst_priv_sz)
1310 lsreq->private = (void *)&conn_acc[1];
1312 lsreq->private = NULL;
1314 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1315 conn_rqst->desc_list_len = cpu_to_be32(
1316 sizeof(struct fcnvme_lsdesc_assoc_id) +
1317 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1319 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1320 conn_rqst->associd.desc_len =
1322 sizeof(struct fcnvme_lsdesc_assoc_id));
1323 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1324 conn_rqst->connect_cmd.desc_tag =
1325 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1326 conn_rqst->connect_cmd.desc_len =
1328 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1329 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1330 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1331 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1333 lsop->queue = queue;
1334 lsreq->rqstaddr = conn_rqst;
1335 lsreq->rqstlen = sizeof(*conn_rqst);
1336 lsreq->rspaddr = conn_acc;
1337 lsreq->rsplen = sizeof(*conn_acc);
1338 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1340 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1342 goto out_free_buffer;
1344 /* process connect LS completion */
1346 /* validate the ACC response */
1347 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1349 else if (conn_acc->hdr.desc_list_len !=
1350 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1351 fcret = VERR_CR_CONN_ACC_LEN;
1352 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1353 fcret = VERR_LSDESC_RQST;
1354 else if (conn_acc->hdr.rqst.desc_len !=
1355 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1356 fcret = VERR_LSDESC_RQST_LEN;
1357 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1358 fcret = VERR_CR_CONN;
1359 else if (conn_acc->connectid.desc_tag !=
1360 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1361 fcret = VERR_CONN_ID;
1362 else if (conn_acc->connectid.desc_len !=
1363 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1364 fcret = VERR_CONN_ID_LEN;
1369 "q %d Create I/O Connection LS failed: %s\n",
1370 queue->qnum, validation_errors[fcret]);
1372 queue->connection_id =
1373 be64_to_cpu(conn_acc->connectid.connection_id);
1374 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1382 "queue %d connect I/O queue failed (%d).\n",
1388 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1390 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1392 __nvme_fc_finish_ls_req(lsop);
1394 /* fc-nvme initiator doesn't care about success or failure of cmd */
1400 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1401 * the FC-NVME Association. Terminating the association also
1402 * terminates the FC-NVME connections (per queue, both admin and io
1403 * queues) that are part of the association. E.g. things are torn
1404 * down, and the related FC-NVME Association ID and Connection IDs
1407 * The behavior of the fc-nvme initiator is such that it's
1408 * understanding of the association and connections will implicitly
1409 * be torn down. The action is implicit as it may be due to a loss of
1410 * connectivity with the fc-nvme target, so you may never get a
1411 * response even if you tried. As such, the action of this routine
1412 * is to asynchronously send the LS, ignore any results of the LS, and
1413 * continue on with terminating the association. If the fc-nvme target
1414 * is present and receives the LS, it too can tear down.
1417 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1419 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1420 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1421 struct nvmefc_ls_req_op *lsop;
1422 struct nvmefc_ls_req *lsreq;
1425 lsop = kzalloc((sizeof(*lsop) +
1426 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1427 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1429 dev_info(ctrl->ctrl.device,
1430 "NVME-FC{%d}: send Disconnect Association "
1436 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1437 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1438 lsreq = &lsop->ls_req;
1439 if (ctrl->lport->ops->lsrqst_priv_sz)
1440 lsreq->private = (void *)&discon_acc[1];
1442 lsreq->private = NULL;
1444 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1445 ctrl->association_id);
1447 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1448 nvme_fc_disconnect_assoc_done);
1454 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1456 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1457 struct nvme_fc_rport *rport = lsop->rport;
1458 struct nvme_fc_lport *lport = rport->lport;
1459 unsigned long flags;
1461 spin_lock_irqsave(&rport->lock, flags);
1462 list_del(&lsop->lsrcv_list);
1463 spin_unlock_irqrestore(&rport->lock, flags);
1465 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1466 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1467 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1468 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1472 nvme_fc_rport_put(rport);
1476 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1478 struct nvme_fc_rport *rport = lsop->rport;
1479 struct nvme_fc_lport *lport = rport->lport;
1480 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1483 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1484 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1486 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1489 dev_warn(lport->dev,
1490 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1492 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1497 static struct nvme_fc_ctrl *
1498 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1499 struct nvmefc_ls_rcv_op *lsop)
1501 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1502 &lsop->rqstbuf->rq_dis_assoc;
1503 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1504 struct nvmefc_ls_rcv_op *oldls = NULL;
1505 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1506 unsigned long flags;
1508 spin_lock_irqsave(&rport->lock, flags);
1510 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1511 if (!nvme_fc_ctrl_get(ctrl))
1513 spin_lock(&ctrl->lock);
1514 if (association_id == ctrl->association_id) {
1515 oldls = ctrl->rcv_disconn;
1516 ctrl->rcv_disconn = lsop;
1519 spin_unlock(&ctrl->lock);
1521 /* leave the ctrl get reference */
1523 nvme_fc_ctrl_put(ctrl);
1526 spin_unlock_irqrestore(&rport->lock, flags);
1528 /* transmit a response for anything that was pending */
1530 dev_info(rport->lport->dev,
1531 "NVME-FC{%d}: Multiple Disconnect Association "
1532 "LS's received\n", ctrl->cnum);
1533 /* overwrite good response with bogus failure */
1534 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1535 sizeof(*oldls->rspbuf),
1538 FCNVME_RJT_EXP_NONE, 0);
1539 nvme_fc_xmt_ls_rsp(oldls);
1546 * returns true to mean LS handled and ls_rsp can be sent
1547 * returns false to defer ls_rsp xmt (will be done as part of
1548 * association termination)
1551 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1553 struct nvme_fc_rport *rport = lsop->rport;
1554 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1555 &lsop->rqstbuf->rq_dis_assoc;
1556 struct fcnvme_ls_disconnect_assoc_acc *acc =
1557 &lsop->rspbuf->rsp_dis_assoc;
1558 struct nvme_fc_ctrl *ctrl = NULL;
1561 memset(acc, 0, sizeof(*acc));
1563 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1565 /* match an active association */
1566 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1568 ret = VERR_NO_ASSOC;
1572 dev_info(rport->lport->dev,
1573 "Disconnect LS failed: %s\n",
1574 validation_errors[ret]);
1575 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1576 sizeof(*acc), rqst->w0.ls_cmd,
1577 (ret == VERR_NO_ASSOC) ?
1578 FCNVME_RJT_RC_INV_ASSOC :
1579 FCNVME_RJT_RC_LOGIC,
1580 FCNVME_RJT_EXP_NONE, 0);
1584 /* format an ACCept response */
1586 lsop->lsrsp->rsplen = sizeof(*acc);
1588 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1590 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1591 FCNVME_LS_DISCONNECT_ASSOC);
1594 * the transmit of the response will occur after the exchanges
1595 * for the association have been ABTS'd by
1596 * nvme_fc_delete_association().
1599 /* fail the association */
1600 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1602 /* release the reference taken by nvme_fc_match_disconn_ls() */
1603 nvme_fc_ctrl_put(ctrl);
1609 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1610 * returns true if a response should be sent afterward, false if rsp will
1611 * be sent asynchronously.
1614 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1616 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1619 lsop->lsrsp->nvme_fc_private = lsop;
1620 lsop->lsrsp->rspbuf = lsop->rspbuf;
1621 lsop->lsrsp->rspdma = lsop->rspdma;
1622 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1623 /* Be preventative. handlers will later set to valid length */
1624 lsop->lsrsp->rsplen = 0;
1628 * parse request input, execute the request, and format the
1631 switch (w0->ls_cmd) {
1632 case FCNVME_LS_DISCONNECT_ASSOC:
1633 ret = nvme_fc_ls_disconnect_assoc(lsop);
1635 case FCNVME_LS_DISCONNECT_CONN:
1636 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1637 sizeof(*lsop->rspbuf), w0->ls_cmd,
1638 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1640 case FCNVME_LS_CREATE_ASSOCIATION:
1641 case FCNVME_LS_CREATE_CONNECTION:
1642 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1643 sizeof(*lsop->rspbuf), w0->ls_cmd,
1644 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1647 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1648 sizeof(*lsop->rspbuf), w0->ls_cmd,
1649 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1657 nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1659 struct nvme_fc_rport *rport =
1660 container_of(work, struct nvme_fc_rport, lsrcv_work);
1661 struct fcnvme_ls_rqst_w0 *w0;
1662 struct nvmefc_ls_rcv_op *lsop;
1663 unsigned long flags;
1668 spin_lock_irqsave(&rport->lock, flags);
1669 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1673 lsop->handled = true;
1674 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1675 spin_unlock_irqrestore(&rport->lock, flags);
1676 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1678 spin_unlock_irqrestore(&rport->lock, flags);
1679 w0 = &lsop->rqstbuf->w0;
1680 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1682 sizeof(*lsop->rspbuf),
1685 FCNVME_RJT_EXP_NONE, 0);
1688 nvme_fc_xmt_ls_rsp(lsop);
1691 spin_unlock_irqrestore(&rport->lock, flags);
1695 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1696 * upon the reception of a NVME LS request.
1698 * The nvme-fc layer will copy payload to an internal structure for
1699 * processing. As such, upon completion of the routine, the LLDD may
1700 * immediately free/reuse the LS request buffer passed in the call.
1702 * If this routine returns error, the LLDD should abort the exchange.
1704 * @remoteport: pointer to the (registered) remote port that the LS
1705 * was received from. The remoteport is associated with
1706 * a specific localport.
1707 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1708 * used to reference the exchange corresponding to the LS
1709 * when issuing an ls response.
1710 * @lsreqbuf: pointer to the buffer containing the LS Request
1711 * @lsreqbuf_len: length, in bytes, of the received LS request
1714 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1715 struct nvmefc_ls_rsp *lsrsp,
1716 void *lsreqbuf, u32 lsreqbuf_len)
1718 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1719 struct nvme_fc_lport *lport = rport->lport;
1720 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1721 struct nvmefc_ls_rcv_op *lsop;
1722 unsigned long flags;
1725 nvme_fc_rport_get(rport);
1727 /* validate there's a routine to transmit a response */
1728 if (!lport->ops->xmt_ls_rsp) {
1729 dev_info(lport->dev,
1730 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1731 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1732 nvmefc_ls_names[w0->ls_cmd] : "");
1737 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1738 dev_info(lport->dev,
1739 "RCV %s LS failed: payload too large\n",
1740 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1741 nvmefc_ls_names[w0->ls_cmd] : "");
1746 lsop = kzalloc(sizeof(*lsop) +
1747 sizeof(union nvmefc_ls_requests) +
1748 sizeof(union nvmefc_ls_responses),
1751 dev_info(lport->dev,
1752 "RCV %s LS failed: No memory\n",
1753 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1754 nvmefc_ls_names[w0->ls_cmd] : "");
1758 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1759 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1761 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1762 sizeof(*lsop->rspbuf),
1764 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1765 dev_info(lport->dev,
1766 "RCV %s LS failed: DMA mapping failure\n",
1767 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1768 nvmefc_ls_names[w0->ls_cmd] : "");
1773 lsop->rport = rport;
1774 lsop->lsrsp = lsrsp;
1776 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1777 lsop->rqstdatalen = lsreqbuf_len;
1779 spin_lock_irqsave(&rport->lock, flags);
1780 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1781 spin_unlock_irqrestore(&rport->lock, flags);
1785 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1786 spin_unlock_irqrestore(&rport->lock, flags);
1788 schedule_work(&rport->lsrcv_work);
1793 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1794 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1798 nvme_fc_rport_put(rport);
1801 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1804 /* *********************** NVME Ctrl Routines **************************** */
1807 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1808 struct nvme_fc_fcp_op *op)
1810 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1811 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1812 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1813 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1815 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1819 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1820 unsigned int hctx_idx)
1822 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1824 return __nvme_fc_exit_request(set->driver_data, op);
1828 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1830 unsigned long flags;
1833 spin_lock_irqsave(&ctrl->lock, flags);
1834 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1835 if (opstate != FCPOP_STATE_ACTIVE)
1836 atomic_set(&op->state, opstate);
1837 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1838 op->flags |= FCOP_FLAGS_TERMIO;
1841 spin_unlock_irqrestore(&ctrl->lock, flags);
1843 if (opstate != FCPOP_STATE_ACTIVE)
1846 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1847 &ctrl->rport->remoteport,
1848 op->queue->lldd_handle,
1855 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1857 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1860 /* ensure we've initialized the ops once */
1861 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1864 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1865 __nvme_fc_abort_op(ctrl, aen_op);
1869 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1870 struct nvme_fc_fcp_op *op, int opstate)
1872 unsigned long flags;
1874 if (opstate == FCPOP_STATE_ABORTED) {
1875 spin_lock_irqsave(&ctrl->lock, flags);
1876 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1877 op->flags & FCOP_FLAGS_TERMIO) {
1879 wake_up(&ctrl->ioabort_wait);
1881 spin_unlock_irqrestore(&ctrl->lock, flags);
1886 nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1888 struct nvme_fc_ctrl *ctrl =
1889 container_of(work, struct nvme_fc_ctrl, ioerr_work);
1891 nvme_fc_error_recovery(ctrl, "transport detected io error");
1895 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1897 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1898 struct request *rq = op->rq;
1899 struct nvmefc_fcp_req *freq = &op->fcp_req;
1900 struct nvme_fc_ctrl *ctrl = op->ctrl;
1901 struct nvme_fc_queue *queue = op->queue;
1902 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1903 struct nvme_command *sqe = &op->cmd_iu.sqe;
1904 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1905 union nvme_result result;
1906 bool terminate_assoc = true;
1911 * The current linux implementation of a nvme controller
1912 * allocates a single tag set for all io queues and sizes
1913 * the io queues to fully hold all possible tags. Thus, the
1914 * implementation does not reference or care about the sqhd
1915 * value as it never needs to use the sqhd/sqtail pointers
1916 * for submission pacing.
1918 * This affects the FC-NVME implementation in two ways:
1919 * 1) As the value doesn't matter, we don't need to waste
1920 * cycles extracting it from ERSPs and stamping it in the
1921 * cases where the transport fabricates CQEs on successful
1923 * 2) The FC-NVME implementation requires that delivery of
1924 * ERSP completions are to go back to the nvme layer in order
1925 * relative to the rsn, such that the sqhd value will always
1926 * be "in order" for the nvme layer. As the nvme layer in
1927 * linux doesn't care about sqhd, there's no need to return
1931 * As the core nvme layer in linux currently does not look at
1932 * every field in the cqe - in cases where the FC transport must
1933 * fabricate a CQE, the following fields will not be set as they
1934 * are not referenced:
1935 * cqe.sqid, cqe.sqhd, cqe.command_id
1937 * Failure or error of an individual i/o, in a transport
1938 * detected fashion unrelated to the nvme completion status,
1939 * potentially cause the initiator and target sides to get out
1940 * of sync on SQ head/tail (aka outstanding io count allowed).
1941 * Per FC-NVME spec, failure of an individual command requires
1942 * the connection to be terminated, which in turn requires the
1943 * association to be terminated.
1946 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1948 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1949 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1951 if (opstate == FCPOP_STATE_ABORTED)
1952 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1953 else if (freq->status) {
1954 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1955 dev_info(ctrl->ctrl.device,
1956 "NVME-FC{%d}: io failed due to lldd error %d\n",
1957 ctrl->cnum, freq->status);
1961 * For the linux implementation, if we have an unsuccesful
1962 * status, they blk-mq layer can typically be called with the
1963 * non-zero status and the content of the cqe isn't important.
1969 * command completed successfully relative to the wire
1970 * protocol. However, validate anything received and
1971 * extract the status and result from the cqe (create it
1975 switch (freq->rcv_rsplen) {
1978 case NVME_FC_SIZEOF_ZEROS_RSP:
1980 * No response payload or 12 bytes of payload (which
1981 * should all be zeros) are considered successful and
1982 * no payload in the CQE by the transport.
1984 if (freq->transferred_length !=
1985 be32_to_cpu(op->cmd_iu.data_len)) {
1986 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1987 dev_info(ctrl->ctrl.device,
1988 "NVME-FC{%d}: io failed due to bad transfer "
1989 "length: %d vs expected %d\n",
1990 ctrl->cnum, freq->transferred_length,
1991 be32_to_cpu(op->cmd_iu.data_len));
1997 case sizeof(struct nvme_fc_ersp_iu):
1999 * The ERSP IU contains a full completion with CQE.
2000 * Validate ERSP IU and look at cqe.
2002 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2003 (freq->rcv_rsplen / 4) ||
2004 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2005 freq->transferred_length ||
2006 op->rsp_iu.ersp_result ||
2007 sqe->common.command_id != cqe->command_id)) {
2008 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2009 dev_info(ctrl->ctrl.device,
2010 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2011 "iu len %d, xfr len %d vs %d, status code "
2012 "%d, cmdid %d vs %d\n",
2013 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2014 be32_to_cpu(op->rsp_iu.xfrd_len),
2015 freq->transferred_length,
2016 op->rsp_iu.ersp_result,
2017 sqe->common.command_id,
2021 result = cqe->result;
2022 status = cqe->status;
2026 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2027 dev_info(ctrl->ctrl.device,
2028 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2030 ctrl->cnum, freq->rcv_rsplen);
2034 terminate_assoc = false;
2037 if (op->flags & FCOP_FLAGS_AEN) {
2038 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2039 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2040 atomic_set(&op->state, FCPOP_STATE_IDLE);
2041 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
2042 nvme_fc_ctrl_put(ctrl);
2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2047 if (!nvme_try_complete_req(rq, status, result))
2048 nvme_fc_complete_rq(rq);
2051 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2052 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2056 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2057 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2058 struct request *rq, u32 rqno)
2060 struct nvme_fcp_op_w_sgl *op_w_sgl =
2061 container_of(op, typeof(*op_w_sgl), op);
2062 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2065 memset(op, 0, sizeof(*op));
2066 op->fcp_req.cmdaddr = &op->cmd_iu;
2067 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2068 op->fcp_req.rspaddr = &op->rsp_iu;
2069 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2070 op->fcp_req.done = nvme_fc_fcpio_done;
2076 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2077 cmdiu->fc_id = NVME_CMD_FC_ID;
2078 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2080 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2081 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2083 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2085 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2086 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2087 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2089 "FCP Op failed - cmdiu dma mapping failed.\n");
2094 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2095 &op->rsp_iu, sizeof(op->rsp_iu),
2097 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2099 "FCP Op failed - rspiu dma mapping failed.\n");
2103 atomic_set(&op->state, FCPOP_STATE_IDLE);
2109 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2110 unsigned int hctx_idx, unsigned int numa_node)
2112 struct nvme_fc_ctrl *ctrl = set->driver_data;
2113 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2114 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2115 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2118 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2121 op->op.fcp_req.first_sgl = op->sgl;
2122 op->op.fcp_req.private = &op->priv[0];
2123 nvme_req(rq)->ctrl = &ctrl->ctrl;
2128 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2130 struct nvme_fc_fcp_op *aen_op;
2131 struct nvme_fc_cmd_iu *cmdiu;
2132 struct nvme_command *sqe;
2133 void *private = NULL;
2136 aen_op = ctrl->aen_ops;
2137 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2138 if (ctrl->lport->ops->fcprqst_priv_sz) {
2139 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2145 cmdiu = &aen_op->cmd_iu;
2147 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2148 aen_op, (struct request *)NULL,
2149 (NVME_AQ_BLK_MQ_DEPTH + i));
2155 aen_op->flags = FCOP_FLAGS_AEN;
2156 aen_op->fcp_req.private = private;
2158 memset(sqe, 0, sizeof(*sqe));
2159 sqe->common.opcode = nvme_admin_async_event;
2160 /* Note: core layer may overwrite the sqe.command_id value */
2161 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2167 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2169 struct nvme_fc_fcp_op *aen_op;
2172 cancel_work_sync(&ctrl->ctrl.async_event_work);
2173 aen_op = ctrl->aen_ops;
2174 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2175 __nvme_fc_exit_request(ctrl, aen_op);
2177 kfree(aen_op->fcp_req.private);
2178 aen_op->fcp_req.private = NULL;
2183 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2186 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2188 hctx->driver_data = queue;
2193 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2194 unsigned int hctx_idx)
2196 struct nvme_fc_ctrl *ctrl = data;
2198 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2204 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2205 unsigned int hctx_idx)
2207 struct nvme_fc_ctrl *ctrl = data;
2209 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2215 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2217 struct nvme_fc_queue *queue;
2219 queue = &ctrl->queues[idx];
2220 memset(queue, 0, sizeof(*queue));
2223 atomic_set(&queue->csn, 0);
2224 queue->dev = ctrl->dev;
2227 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2229 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2232 * Considered whether we should allocate buffers for all SQEs
2233 * and CQEs and dma map them - mapping their respective entries
2234 * into the request structures (kernel vm addr and dma address)
2235 * thus the driver could use the buffers/mappings directly.
2236 * It only makes sense if the LLDD would use them for its
2237 * messaging api. It's very unlikely most adapter api's would use
2238 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2239 * structures were used instead.
2244 * This routine terminates a queue at the transport level.
2245 * The transport has already ensured that all outstanding ios on
2246 * the queue have been terminated.
2247 * The transport will send a Disconnect LS request to terminate
2248 * the queue's connection. Termination of the admin queue will also
2249 * terminate the association at the target.
2252 nvme_fc_free_queue(struct nvme_fc_queue *queue)
2254 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2257 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2259 * Current implementation never disconnects a single queue.
2260 * It always terminates a whole association. So there is never
2261 * a disconnect(queue) LS sent to the target.
2264 queue->connection_id = 0;
2265 atomic_set(&queue->csn, 0);
2269 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2270 struct nvme_fc_queue *queue, unsigned int qidx)
2272 if (ctrl->lport->ops->delete_queue)
2273 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2274 queue->lldd_handle);
2275 queue->lldd_handle = NULL;
2279 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2283 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2284 nvme_fc_free_queue(&ctrl->queues[i]);
2288 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2289 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2293 queue->lldd_handle = NULL;
2294 if (ctrl->lport->ops->create_queue)
2295 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2296 qidx, qsize, &queue->lldd_handle);
2302 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2304 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2307 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2308 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2312 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2314 struct nvme_fc_queue *queue = &ctrl->queues[1];
2317 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2318 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2327 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2332 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2336 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2337 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2341 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
2345 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2352 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2356 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2357 nvme_fc_init_queue(ctrl, i);
2361 nvme_fc_ctrl_free(struct kref *ref)
2363 struct nvme_fc_ctrl *ctrl =
2364 container_of(ref, struct nvme_fc_ctrl, ref);
2365 unsigned long flags;
2367 if (ctrl->ctrl.tagset) {
2368 blk_cleanup_queue(ctrl->ctrl.connect_q);
2369 blk_mq_free_tag_set(&ctrl->tag_set);
2372 /* remove from rport list */
2373 spin_lock_irqsave(&ctrl->rport->lock, flags);
2374 list_del(&ctrl->ctrl_list);
2375 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2377 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2378 blk_cleanup_queue(ctrl->ctrl.admin_q);
2379 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2380 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2382 kfree(ctrl->queues);
2384 put_device(ctrl->dev);
2385 nvme_fc_rport_put(ctrl->rport);
2387 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2388 if (ctrl->ctrl.opts)
2389 nvmf_free_options(ctrl->ctrl.opts);
2394 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2396 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2400 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2402 return kref_get_unless_zero(&ctrl->ref);
2406 * All accesses from nvme core layer done - can now free the
2407 * controller. Called after last nvme_put_ctrl() call
2410 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2412 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2414 WARN_ON(nctrl != &ctrl->ctrl);
2416 nvme_fc_ctrl_put(ctrl);
2420 * This routine is used by the transport when it needs to find active
2421 * io on a queue that is to be terminated. The transport uses
2422 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2423 * this routine to kill them on a 1 by 1 basis.
2425 * As FC allocates FC exchange for each io, the transport must contact
2426 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2427 * After terminating the exchange the LLDD will call the transport's
2428 * normal io done path for the request, but it will have an aborted
2429 * status. The done path will return the io request back to the block
2430 * layer with an error status.
2433 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2435 struct nvme_ctrl *nctrl = data;
2436 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2437 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2439 op->nreq.flags |= NVME_REQ_CANCELLED;
2440 __nvme_fc_abort_op(ctrl, op);
2445 * This routine runs through all outstanding commands on the association
2446 * and aborts them. This routine is typically be called by the
2447 * delete_association routine. It is also called due to an error during
2448 * reconnect. In that scenario, it is most likely a command that initializes
2449 * the controller, including fabric Connect commands on io queues, that
2450 * may have timed out or failed thus the io must be killed for the connect
2451 * thread to see the error.
2454 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2459 * if aborting io, the queues are no longer good, mark them
2462 if (ctrl->ctrl.queue_count > 1) {
2463 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2464 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2466 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2469 * If io queues are present, stop them and terminate all outstanding
2470 * ios on them. As FC allocates FC exchange for each io, the
2471 * transport must contact the LLDD to terminate the exchange,
2472 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2473 * to tell us what io's are busy and invoke a transport routine
2474 * to kill them with the LLDD. After terminating the exchange
2475 * the LLDD will call the transport's normal io done path, but it
2476 * will have an aborted status. The done path will return the
2477 * io requests back to the block layer as part of normal completions
2478 * (but with error status).
2480 if (ctrl->ctrl.queue_count > 1) {
2481 nvme_stop_queues(&ctrl->ctrl);
2482 nvme_sync_io_queues(&ctrl->ctrl);
2483 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2484 nvme_fc_terminate_exchange, &ctrl->ctrl);
2485 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2487 nvme_start_queues(&ctrl->ctrl);
2491 * Other transports, which don't have link-level contexts bound
2492 * to sqe's, would try to gracefully shutdown the controller by
2493 * writing the registers for shutdown and polling (call
2494 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2495 * just aborted and we will wait on those contexts, and given
2496 * there was no indication of how live the controlelr is on the
2497 * link, don't send more io to create more contexts for the
2498 * shutdown. Let the controller fail via keepalive failure if
2499 * its still present.
2503 * clean up the admin queue. Same thing as above.
2505 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2506 blk_sync_queue(ctrl->ctrl.admin_q);
2507 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2508 nvme_fc_terminate_exchange, &ctrl->ctrl);
2509 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2513 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2516 * if an error (io timeout, etc) while (re)connecting, the remote
2517 * port requested terminating of the association (disconnect_ls)
2518 * or an error (timeout or abort) occurred on an io while creating
2519 * the controller. Abort any ios on the association and let the
2520 * create_association error path resolve things.
2522 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2523 __nvme_fc_abort_outstanding_ios(ctrl, true);
2524 set_bit(ASSOC_FAILED, &ctrl->flags);
2528 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2529 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2532 dev_warn(ctrl->ctrl.device,
2533 "NVME-FC{%d}: transport association event: %s\n",
2534 ctrl->cnum, errmsg);
2535 dev_warn(ctrl->ctrl.device,
2536 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2538 nvme_reset_ctrl(&ctrl->ctrl);
2541 static enum blk_eh_timer_return
2542 nvme_fc_timeout(struct request *rq, bool reserved)
2544 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2545 struct nvme_fc_ctrl *ctrl = op->ctrl;
2546 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2547 struct nvme_command *sqe = &cmdiu->sqe;
2550 * Attempt to abort the offending command. Command completion
2551 * will detect the aborted io and will fail the connection.
2553 dev_info(ctrl->ctrl.device,
2554 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2556 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2557 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2558 if (__nvme_fc_abort_op(ctrl, op))
2559 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2562 * the io abort has been initiated. Have the reset timer
2563 * restarted and the abort completion will complete the io
2564 * shortly. Avoids a synchronous wait while the abort finishes.
2566 return BLK_EH_RESET_TIMER;
2570 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2571 struct nvme_fc_fcp_op *op)
2573 struct nvmefc_fcp_req *freq = &op->fcp_req;
2578 if (!blk_rq_nr_phys_segments(rq))
2581 freq->sg_table.sgl = freq->first_sgl;
2582 ret = sg_alloc_table_chained(&freq->sg_table,
2583 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2584 NVME_INLINE_SG_CNT);
2588 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2589 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2590 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2591 op->nents, rq_dma_dir(rq));
2592 if (unlikely(freq->sg_cnt <= 0)) {
2593 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2599 * TODO: blk_integrity_rq(rq) for DIF
2605 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2606 struct nvme_fc_fcp_op *op)
2608 struct nvmefc_fcp_req *freq = &op->fcp_req;
2613 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2616 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2622 * In FC, the queue is a logical thing. At transport connect, the target
2623 * creates its "queue" and returns a handle that is to be given to the
2624 * target whenever it posts something to the corresponding SQ. When an
2625 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2626 * command contained within the SQE, an io, and assigns a FC exchange
2627 * to it. The SQE and the associated SQ handle are sent in the initial
2628 * CMD IU sents on the exchange. All transfers relative to the io occur
2629 * as part of the exchange. The CQE is the last thing for the io,
2630 * which is transferred (explicitly or implicitly) with the RSP IU
2631 * sent on the exchange. After the CQE is received, the FC exchange is
2632 * terminaed and the Exchange may be used on a different io.
2634 * The transport to LLDD api has the transport making a request for a
2635 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2636 * resource and transfers the command. The LLDD will then process all
2637 * steps to complete the io. Upon completion, the transport done routine
2640 * So - while the operation is outstanding to the LLDD, there is a link
2641 * level FC exchange resource that is also outstanding. This must be
2642 * considered in all cleanup operations.
2645 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2646 struct nvme_fc_fcp_op *op, u32 data_len,
2647 enum nvmefc_fcp_datadir io_dir)
2649 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2650 struct nvme_command *sqe = &cmdiu->sqe;
2654 * before attempting to send the io, check to see if we believe
2655 * the target device is present
2657 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2658 return BLK_STS_RESOURCE;
2660 if (!nvme_fc_ctrl_get(ctrl))
2661 return BLK_STS_IOERR;
2663 /* format the FC-NVME CMD IU and fcp_req */
2664 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2665 cmdiu->data_len = cpu_to_be32(data_len);
2667 case NVMEFC_FCP_WRITE:
2668 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2670 case NVMEFC_FCP_READ:
2671 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2673 case NVMEFC_FCP_NODATA:
2677 op->fcp_req.payload_length = data_len;
2678 op->fcp_req.io_dir = io_dir;
2679 op->fcp_req.transferred_length = 0;
2680 op->fcp_req.rcv_rsplen = 0;
2681 op->fcp_req.status = NVME_SC_SUCCESS;
2682 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2685 * validate per fabric rules, set fields mandated by fabric spec
2686 * as well as those by FC-NVME spec.
2688 WARN_ON_ONCE(sqe->common.metadata);
2689 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2692 * format SQE DPTR field per FC-NVME rules:
2693 * type=0x5 Transport SGL Data Block Descriptor
2694 * subtype=0xA Transport-specific value
2696 * length=length of the data series
2698 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2699 NVME_SGL_FMT_TRANSPORT_A;
2700 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2701 sqe->rw.dptr.sgl.addr = 0;
2703 if (!(op->flags & FCOP_FLAGS_AEN)) {
2704 ret = nvme_fc_map_data(ctrl, op->rq, op);
2706 nvme_cleanup_cmd(op->rq);
2707 nvme_fc_ctrl_put(ctrl);
2708 if (ret == -ENOMEM || ret == -EAGAIN)
2709 return BLK_STS_RESOURCE;
2710 return BLK_STS_IOERR;
2714 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2715 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2717 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2719 if (!(op->flags & FCOP_FLAGS_AEN))
2720 blk_mq_start_request(op->rq);
2722 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2723 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2724 &ctrl->rport->remoteport,
2725 queue->lldd_handle, &op->fcp_req);
2729 * If the lld fails to send the command is there an issue with
2730 * the csn value? If the command that fails is the Connect,
2731 * no - as the connection won't be live. If it is a command
2732 * post-connect, it's possible a gap in csn may be created.
2733 * Does this matter? As Linux initiators don't send fused
2734 * commands, no. The gap would exist, but as there's nothing
2735 * that depends on csn order to be delivered on the target
2736 * side, it shouldn't hurt. It would be difficult for a
2737 * target to even detect the csn gap as it has no idea when the
2738 * cmd with the csn was supposed to arrive.
2740 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2741 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2743 if (!(op->flags & FCOP_FLAGS_AEN)) {
2744 nvme_fc_unmap_data(ctrl, op->rq, op);
2745 nvme_cleanup_cmd(op->rq);
2748 nvme_fc_ctrl_put(ctrl);
2750 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2752 return BLK_STS_IOERR;
2754 return BLK_STS_RESOURCE;
2761 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2762 const struct blk_mq_queue_data *bd)
2764 struct nvme_ns *ns = hctx->queue->queuedata;
2765 struct nvme_fc_queue *queue = hctx->driver_data;
2766 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2767 struct request *rq = bd->rq;
2768 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2769 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2770 struct nvme_command *sqe = &cmdiu->sqe;
2771 enum nvmefc_fcp_datadir io_dir;
2772 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2776 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2777 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2778 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2780 ret = nvme_setup_cmd(ns, rq, sqe);
2785 * nvme core doesn't quite treat the rq opaquely. Commands such
2786 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2787 * there is no actual payload to be transferred.
2788 * To get it right, key data transmission on there being 1 or
2789 * more physical segments in the sg list. If there is no
2790 * physical segments, there is no payload.
2792 if (blk_rq_nr_phys_segments(rq)) {
2793 data_len = blk_rq_payload_bytes(rq);
2794 io_dir = ((rq_data_dir(rq) == WRITE) ?
2795 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2798 io_dir = NVMEFC_FCP_NODATA;
2802 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2806 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2808 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2809 struct nvme_fc_fcp_op *aen_op;
2812 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2815 aen_op = &ctrl->aen_ops[0];
2817 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2820 dev_err(ctrl->ctrl.device,
2821 "failed async event work\n");
2825 nvme_fc_complete_rq(struct request *rq)
2827 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2828 struct nvme_fc_ctrl *ctrl = op->ctrl;
2830 atomic_set(&op->state, FCPOP_STATE_IDLE);
2831 op->flags &= ~FCOP_FLAGS_TERMIO;
2833 nvme_fc_unmap_data(ctrl, rq, op);
2834 nvme_complete_rq(rq);
2835 nvme_fc_ctrl_put(ctrl);
2839 static const struct blk_mq_ops nvme_fc_mq_ops = {
2840 .queue_rq = nvme_fc_queue_rq,
2841 .complete = nvme_fc_complete_rq,
2842 .init_request = nvme_fc_init_request,
2843 .exit_request = nvme_fc_exit_request,
2844 .init_hctx = nvme_fc_init_hctx,
2845 .timeout = nvme_fc_timeout,
2849 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2851 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2852 unsigned int nr_io_queues;
2855 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2856 ctrl->lport->ops->max_hw_queues);
2857 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2859 dev_info(ctrl->ctrl.device,
2860 "set_queue_count failed: %d\n", ret);
2864 ctrl->ctrl.queue_count = nr_io_queues + 1;
2868 nvme_fc_init_io_queues(ctrl);
2870 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2871 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2872 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2873 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2874 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2875 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2876 ctrl->tag_set.cmd_size =
2877 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2878 ctrl->lport->ops->fcprqst_priv_sz);
2879 ctrl->tag_set.driver_data = ctrl;
2880 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2881 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2883 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2887 ctrl->ctrl.tagset = &ctrl->tag_set;
2889 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2890 if (IS_ERR(ctrl->ctrl.connect_q)) {
2891 ret = PTR_ERR(ctrl->ctrl.connect_q);
2892 goto out_free_tag_set;
2895 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2897 goto out_cleanup_blk_queue;
2899 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2901 goto out_delete_hw_queues;
2903 ctrl->ioq_live = true;
2907 out_delete_hw_queues:
2908 nvme_fc_delete_hw_io_queues(ctrl);
2909 out_cleanup_blk_queue:
2910 blk_cleanup_queue(ctrl->ctrl.connect_q);
2912 blk_mq_free_tag_set(&ctrl->tag_set);
2913 nvme_fc_free_io_queues(ctrl);
2915 /* force put free routine to ignore io queues */
2916 ctrl->ctrl.tagset = NULL;
2922 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2924 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2925 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2926 unsigned int nr_io_queues;
2929 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2930 ctrl->lport->ops->max_hw_queues);
2931 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2933 dev_info(ctrl->ctrl.device,
2934 "set_queue_count failed: %d\n", ret);
2938 if (!nr_io_queues && prior_ioq_cnt) {
2939 dev_info(ctrl->ctrl.device,
2940 "Fail Reconnect: At least 1 io queue "
2941 "required (was %d)\n", prior_ioq_cnt);
2945 ctrl->ctrl.queue_count = nr_io_queues + 1;
2946 /* check for io queues existing */
2947 if (ctrl->ctrl.queue_count == 1)
2950 if (prior_ioq_cnt != nr_io_queues) {
2951 dev_info(ctrl->ctrl.device,
2952 "reconnect: revising io queue count from %d to %d\n",
2953 prior_ioq_cnt, nr_io_queues);
2954 nvme_wait_freeze(&ctrl->ctrl);
2955 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2956 nvme_unfreeze(&ctrl->ctrl);
2959 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2961 goto out_free_io_queues;
2963 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2965 goto out_delete_hw_queues;
2969 out_delete_hw_queues:
2970 nvme_fc_delete_hw_io_queues(ctrl);
2972 nvme_fc_free_io_queues(ctrl);
2977 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2979 struct nvme_fc_lport *lport = rport->lport;
2981 atomic_inc(&lport->act_rport_cnt);
2985 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2987 struct nvme_fc_lport *lport = rport->lport;
2990 cnt = atomic_dec_return(&lport->act_rport_cnt);
2991 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2992 lport->ops->localport_delete(&lport->localport);
2996 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
2998 struct nvme_fc_rport *rport = ctrl->rport;
3001 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3004 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3006 nvme_fc_rport_active_on_lport(rport);
3012 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3014 struct nvme_fc_rport *rport = ctrl->rport;
3015 struct nvme_fc_lport *lport = rport->lport;
3018 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3020 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3022 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3023 lport->ops->remoteport_delete(&rport->remoteport);
3024 nvme_fc_rport_inactive_on_lport(rport);
3031 * This routine restarts the controller on the host side, and
3032 * on the link side, recreates the controller association.
3035 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3037 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3038 struct nvmefc_ls_rcv_op *disls = NULL;
3039 unsigned long flags;
3043 ++ctrl->ctrl.nr_reconnects;
3045 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3048 if (nvme_fc_ctlr_active_on_rport(ctrl))
3051 dev_info(ctrl->ctrl.device,
3052 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3053 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3054 ctrl->cnum, ctrl->lport->localport.port_name,
3055 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3057 clear_bit(ASSOC_FAILED, &ctrl->flags);
3060 * Create the admin queue
3063 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3066 goto out_free_queue;
3068 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3069 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3071 goto out_delete_hw_queue;
3073 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3075 goto out_disconnect_admin_queue;
3077 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3080 * Check controller capabilities
3082 * todo:- add code to check if ctrl attributes changed from
3083 * prior connection values
3086 ret = nvme_enable_ctrl(&ctrl->ctrl);
3087 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3088 goto out_disconnect_admin_queue;
3090 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3091 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3094 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3096 ret = nvme_init_identify(&ctrl->ctrl);
3097 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3098 goto out_disconnect_admin_queue;
3102 /* FC-NVME does not have other data in the capsule */
3103 if (ctrl->ctrl.icdoff) {
3104 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3106 goto out_disconnect_admin_queue;
3109 /* FC-NVME supports normal SGL Data Block Descriptors */
3111 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3112 /* warn if maxcmd is lower than queue_size */
3113 dev_warn(ctrl->ctrl.device,
3114 "queue_size %zu > ctrl maxcmd %u, reducing "
3116 opts->queue_size, ctrl->ctrl.maxcmd);
3117 opts->queue_size = ctrl->ctrl.maxcmd;
3120 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3121 /* warn if sqsize is lower than queue_size */
3122 dev_warn(ctrl->ctrl.device,
3123 "queue_size %zu > ctrl sqsize %u, reducing "
3125 opts->queue_size, ctrl->ctrl.sqsize + 1);
3126 opts->queue_size = ctrl->ctrl.sqsize + 1;
3129 ret = nvme_fc_init_aen_ops(ctrl);
3131 goto out_term_aen_ops;
3134 * Create the io queues
3137 if (ctrl->ctrl.queue_count > 1) {
3138 if (!ctrl->ioq_live)
3139 ret = nvme_fc_create_io_queues(ctrl);
3141 ret = nvme_fc_recreate_io_queues(ctrl);
3143 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3144 goto out_term_aen_ops;
3146 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3148 ctrl->ctrl.nr_reconnects = 0;
3151 nvme_start_ctrl(&ctrl->ctrl);
3153 return 0; /* Success */
3156 nvme_fc_term_aen_ops(ctrl);
3157 out_disconnect_admin_queue:
3158 /* send a Disconnect(association) LS to fc-nvme target */
3159 nvme_fc_xmt_disconnect_assoc(ctrl);
3160 spin_lock_irqsave(&ctrl->lock, flags);
3161 ctrl->association_id = 0;
3162 disls = ctrl->rcv_disconn;
3163 ctrl->rcv_disconn = NULL;
3164 spin_unlock_irqrestore(&ctrl->lock, flags);
3166 nvme_fc_xmt_ls_rsp(disls);
3167 out_delete_hw_queue:
3168 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3170 nvme_fc_free_queue(&ctrl->queues[0]);
3171 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3172 nvme_fc_ctlr_inactive_on_rport(ctrl);
3179 * This routine stops operation of the controller on the host side.
3180 * On the host os stack side: Admin and IO queues are stopped,
3181 * outstanding ios on them terminated via FC ABTS.
3182 * On the link side: the association is terminated.
3185 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3187 struct nvmefc_ls_rcv_op *disls = NULL;
3188 unsigned long flags;
3190 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3193 spin_lock_irqsave(&ctrl->lock, flags);
3194 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3196 spin_unlock_irqrestore(&ctrl->lock, flags);
3198 __nvme_fc_abort_outstanding_ios(ctrl, false);
3200 /* kill the aens as they are a separate path */
3201 nvme_fc_abort_aen_ops(ctrl);
3203 /* wait for all io that had to be aborted */
3204 spin_lock_irq(&ctrl->lock);
3205 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3206 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3207 spin_unlock_irq(&ctrl->lock);
3209 nvme_fc_term_aen_ops(ctrl);
3212 * send a Disconnect(association) LS to fc-nvme target
3213 * Note: could have been sent at top of process, but
3214 * cleaner on link traffic if after the aborts complete.
3215 * Note: if association doesn't exist, association_id will be 0
3217 if (ctrl->association_id)
3218 nvme_fc_xmt_disconnect_assoc(ctrl);
3220 spin_lock_irqsave(&ctrl->lock, flags);
3221 ctrl->association_id = 0;
3222 disls = ctrl->rcv_disconn;
3223 ctrl->rcv_disconn = NULL;
3224 spin_unlock_irqrestore(&ctrl->lock, flags);
3227 * if a Disconnect Request was waiting for a response, send
3228 * now that all ABTS's have been issued (and are complete).
3230 nvme_fc_xmt_ls_rsp(disls);
3232 if (ctrl->ctrl.tagset) {
3233 nvme_fc_delete_hw_io_queues(ctrl);
3234 nvme_fc_free_io_queues(ctrl);
3237 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3238 nvme_fc_free_queue(&ctrl->queues[0]);
3240 /* re-enable the admin_q so anything new can fast fail */
3241 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3243 /* resume the io queues so that things will fast fail */
3244 nvme_start_queues(&ctrl->ctrl);
3246 nvme_fc_ctlr_inactive_on_rport(ctrl);
3250 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3252 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3254 cancel_work_sync(&ctrl->ioerr_work);
3255 cancel_delayed_work_sync(&ctrl->connect_work);
3257 * kill the association on the link side. this will block
3258 * waiting for io to terminate
3260 nvme_fc_delete_association(ctrl);
3264 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3266 struct nvme_fc_rport *rport = ctrl->rport;
3267 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3268 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3271 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3274 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3275 dev_info(ctrl->ctrl.device,
3276 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3277 ctrl->cnum, status);
3278 else if (time_after_eq(jiffies, rport->dev_loss_end))
3281 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3282 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3283 dev_info(ctrl->ctrl.device,
3284 "NVME-FC{%d}: Reconnect attempt in %ld "
3286 ctrl->cnum, recon_delay / HZ);
3287 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3288 recon_delay = rport->dev_loss_end - jiffies;
3290 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3292 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3293 dev_warn(ctrl->ctrl.device,
3294 "NVME-FC{%d}: Max reconnect attempts (%d) "
3296 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3298 dev_warn(ctrl->ctrl.device,
3299 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3300 "while waiting for remoteport connectivity.\n",
3301 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3302 (ctrl->ctrl.opts->max_reconnects *
3303 ctrl->ctrl.opts->reconnect_delay)));
3304 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3309 nvme_fc_reset_ctrl_work(struct work_struct *work)
3311 struct nvme_fc_ctrl *ctrl =
3312 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3314 nvme_stop_ctrl(&ctrl->ctrl);
3316 /* will block will waiting for io to terminate */
3317 nvme_fc_delete_association(ctrl);
3319 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3320 dev_err(ctrl->ctrl.device,
3321 "NVME-FC{%d}: error_recovery: Couldn't change state "
3322 "to CONNECTING\n", ctrl->cnum);
3324 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3325 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3326 dev_err(ctrl->ctrl.device,
3327 "NVME-FC{%d}: failed to schedule connect "
3328 "after reset\n", ctrl->cnum);
3330 flush_delayed_work(&ctrl->connect_work);
3333 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3338 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3340 .module = THIS_MODULE,
3341 .flags = NVME_F_FABRICS,
3342 .reg_read32 = nvmf_reg_read32,
3343 .reg_read64 = nvmf_reg_read64,
3344 .reg_write32 = nvmf_reg_write32,
3345 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3346 .submit_async_event = nvme_fc_submit_async_event,
3347 .delete_ctrl = nvme_fc_delete_ctrl,
3348 .get_address = nvmf_get_address,
3352 nvme_fc_connect_ctrl_work(struct work_struct *work)
3356 struct nvme_fc_ctrl *ctrl =
3357 container_of(to_delayed_work(work),
3358 struct nvme_fc_ctrl, connect_work);
3360 ret = nvme_fc_create_association(ctrl);
3362 nvme_fc_reconnect_or_delete(ctrl, ret);
3364 dev_info(ctrl->ctrl.device,
3365 "NVME-FC{%d}: controller connect complete\n",
3370 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3371 .queue_rq = nvme_fc_queue_rq,
3372 .complete = nvme_fc_complete_rq,
3373 .init_request = nvme_fc_init_request,
3374 .exit_request = nvme_fc_exit_request,
3375 .init_hctx = nvme_fc_init_admin_hctx,
3376 .timeout = nvme_fc_timeout,
3381 * Fails a controller request if it matches an existing controller
3382 * (association) with the same tuple:
3383 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3385 * The ports don't need to be compared as they are intrinsically
3386 * already matched by the port pointers supplied.
3389 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3390 struct nvmf_ctrl_options *opts)
3392 struct nvme_fc_ctrl *ctrl;
3393 unsigned long flags;
3396 spin_lock_irqsave(&rport->lock, flags);
3397 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3398 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3402 spin_unlock_irqrestore(&rport->lock, flags);
3407 static struct nvme_ctrl *
3408 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3409 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3411 struct nvme_fc_ctrl *ctrl;
3412 unsigned long flags;
3413 int ret, idx, ctrl_loss_tmo;
3415 if (!(rport->remoteport.port_role &
3416 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3421 if (!opts->duplicate_connect &&
3422 nvme_fc_existing_controller(rport, opts)) {
3427 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3433 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3440 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3441 * is being used, change to a shorter reconnect delay for FC.
3443 if (opts->max_reconnects != -1 &&
3444 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3445 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3446 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3447 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3448 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3449 opts->reconnect_delay);
3452 ctrl->ctrl.opts = opts;
3453 ctrl->ctrl.nr_reconnects = 0;
3455 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3457 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3458 INIT_LIST_HEAD(&ctrl->ctrl_list);
3459 ctrl->lport = lport;
3460 ctrl->rport = rport;
3461 ctrl->dev = lport->dev;
3463 ctrl->ioq_live = false;
3464 init_waitqueue_head(&ctrl->ioabort_wait);
3466 get_device(ctrl->dev);
3467 kref_init(&ctrl->ref);
3469 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3470 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3471 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3472 spin_lock_init(&ctrl->lock);
3474 /* io queue count */
3475 ctrl->ctrl.queue_count = min_t(unsigned int,
3477 lport->ops->max_hw_queues);
3478 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3480 ctrl->ctrl.sqsize = opts->queue_size - 1;
3481 ctrl->ctrl.kato = opts->kato;
3482 ctrl->ctrl.cntlid = 0xffff;
3485 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3486 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3490 nvme_fc_init_queue(ctrl, 0);
3492 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3493 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3494 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3495 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3496 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3497 ctrl->admin_tag_set.cmd_size =
3498 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3499 ctrl->lport->ops->fcprqst_priv_sz);
3500 ctrl->admin_tag_set.driver_data = ctrl;
3501 ctrl->admin_tag_set.nr_hw_queues = 1;
3502 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3503 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3505 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3507 goto out_free_queues;
3508 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3510 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3511 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3512 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3513 goto out_free_admin_tag_set;
3516 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3517 if (IS_ERR(ctrl->ctrl.admin_q)) {
3518 ret = PTR_ERR(ctrl->ctrl.admin_q);
3519 goto out_cleanup_fabrics_q;
3523 * Would have been nice to init io queues tag set as well.
3524 * However, we require interaction from the controller
3525 * for max io queue count before we can do so.
3526 * Defer this to the connect path.
3529 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3531 goto out_cleanup_admin_q;
3533 /* at this point, teardown path changes to ref counting on nvme ctrl */
3535 spin_lock_irqsave(&rport->lock, flags);
3536 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3537 spin_unlock_irqrestore(&rport->lock, flags);
3539 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3540 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3541 dev_err(ctrl->ctrl.device,
3542 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3546 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3547 dev_err(ctrl->ctrl.device,
3548 "NVME-FC{%d}: failed to schedule initial connect\n",
3553 flush_delayed_work(&ctrl->connect_work);
3555 dev_info(ctrl->ctrl.device,
3556 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3557 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3562 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3563 cancel_work_sync(&ctrl->ioerr_work);
3564 cancel_work_sync(&ctrl->ctrl.reset_work);
3565 cancel_delayed_work_sync(&ctrl->connect_work);
3567 ctrl->ctrl.opts = NULL;
3569 /* initiate nvme ctrl ref counting teardown */
3570 nvme_uninit_ctrl(&ctrl->ctrl);
3572 /* Remove core ctrl ref. */
3573 nvme_put_ctrl(&ctrl->ctrl);
3575 /* as we're past the point where we transition to the ref
3576 * counting teardown path, if we return a bad pointer here,
3577 * the calling routine, thinking it's prior to the
3578 * transition, will do an rport put. Since the teardown
3579 * path also does a rport put, we do an extra get here to
3580 * so proper order/teardown happens.
3582 nvme_fc_rport_get(rport);
3584 return ERR_PTR(-EIO);
3586 out_cleanup_admin_q:
3587 blk_cleanup_queue(ctrl->ctrl.admin_q);
3588 out_cleanup_fabrics_q:
3589 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3590 out_free_admin_tag_set:
3591 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3593 kfree(ctrl->queues);
3595 put_device(ctrl->dev);
3596 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3600 /* exit via here doesn't follow ctlr ref points */
3601 return ERR_PTR(ret);
3605 struct nvmet_fc_traddr {
3611 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3615 if (match_u64(sstr, &token64))
3623 * This routine validates and extracts the WWN's from the TRADDR string.
3624 * As kernel parsers need the 0x to determine number base, universally
3625 * build string to parse with 0x prefix before parsing name strings.
3628 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3630 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3631 substring_t wwn = { name, &name[sizeof(name)-1] };
3632 int nnoffset, pnoffset;
3634 /* validate if string is one of the 2 allowed formats */
3635 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3636 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3637 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3638 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3639 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3640 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3641 NVME_FC_TRADDR_OXNNLEN;
3642 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3643 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3644 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3645 "pn-", NVME_FC_TRADDR_NNLEN))) {
3646 nnoffset = NVME_FC_TRADDR_NNLEN;
3647 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3653 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3655 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3656 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3659 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3660 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3666 pr_warn("%s: bad traddr string\n", __func__);
3670 static struct nvme_ctrl *
3671 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3673 struct nvme_fc_lport *lport;
3674 struct nvme_fc_rport *rport;
3675 struct nvme_ctrl *ctrl;
3676 struct nvmet_fc_traddr laddr = { 0L, 0L };
3677 struct nvmet_fc_traddr raddr = { 0L, 0L };
3678 unsigned long flags;
3681 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3682 if (ret || !raddr.nn || !raddr.pn)
3683 return ERR_PTR(-EINVAL);
3685 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3686 if (ret || !laddr.nn || !laddr.pn)
3687 return ERR_PTR(-EINVAL);
3689 /* find the host and remote ports to connect together */
3690 spin_lock_irqsave(&nvme_fc_lock, flags);
3691 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3692 if (lport->localport.node_name != laddr.nn ||
3693 lport->localport.port_name != laddr.pn ||
3694 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3697 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3698 if (rport->remoteport.node_name != raddr.nn ||
3699 rport->remoteport.port_name != raddr.pn ||
3700 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3703 /* if fail to get reference fall through. Will error */
3704 if (!nvme_fc_rport_get(rport))
3707 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3709 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3711 nvme_fc_rport_put(rport);
3715 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3717 pr_warn("%s: %s - %s combination not found\n",
3718 __func__, opts->traddr, opts->host_traddr);
3719 return ERR_PTR(-ENOENT);
3723 static struct nvmf_transport_ops nvme_fc_transport = {
3725 .module = THIS_MODULE,
3726 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3727 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3728 .create_ctrl = nvme_fc_create_ctrl,
3731 /* Arbitrary successive failures max. With lots of subsystems could be high */
3732 #define DISCOVERY_MAX_FAIL 20
3734 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3735 struct device_attribute *attr, const char *buf, size_t count)
3737 unsigned long flags;
3738 LIST_HEAD(local_disc_list);
3739 struct nvme_fc_lport *lport;
3740 struct nvme_fc_rport *rport;
3743 spin_lock_irqsave(&nvme_fc_lock, flags);
3745 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3746 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3747 if (!nvme_fc_lport_get(lport))
3749 if (!nvme_fc_rport_get(rport)) {
3751 * This is a temporary condition. Upon restart
3752 * this rport will be gone from the list.
3754 * Revert the lport put and retry. Anything
3755 * added to the list already will be skipped (as
3756 * they are no longer list_empty). Loops should
3757 * resume at rports that were not yet seen.
3759 nvme_fc_lport_put(lport);
3761 if (failcnt++ < DISCOVERY_MAX_FAIL)
3764 pr_err("nvme_discovery: too many reference "
3766 goto process_local_list;
3768 if (list_empty(&rport->disc_list))
3769 list_add_tail(&rport->disc_list,
3775 while (!list_empty(&local_disc_list)) {
3776 rport = list_first_entry(&local_disc_list,
3777 struct nvme_fc_rport, disc_list);
3778 list_del_init(&rport->disc_list);
3779 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3781 lport = rport->lport;
3782 /* signal discovery. Won't hurt if it repeats */
3783 nvme_fc_signal_discovery_scan(lport, rport);
3784 nvme_fc_rport_put(rport);
3785 nvme_fc_lport_put(lport);
3787 spin_lock_irqsave(&nvme_fc_lock, flags);
3789 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3793 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3795 static struct attribute *nvme_fc_attrs[] = {
3796 &dev_attr_nvme_discovery.attr,
3800 static struct attribute_group nvme_fc_attr_group = {
3801 .attrs = nvme_fc_attrs,
3804 static const struct attribute_group *nvme_fc_attr_groups[] = {
3805 &nvme_fc_attr_group,
3809 static struct class fc_class = {
3811 .dev_groups = nvme_fc_attr_groups,
3812 .owner = THIS_MODULE,
3815 static int __init nvme_fc_init_module(void)
3821 * It is expected that in the future the kernel will combine
3822 * the FC-isms that are currently under scsi and now being
3823 * added to by NVME into a new standalone FC class. The SCSI
3824 * and NVME protocols and their devices would be under this
3827 * As we need something to post FC-specific udev events to,
3828 * specifically for nvme probe events, start by creating the
3829 * new device class. When the new standalone FC class is
3830 * put in place, this code will move to a more generic
3831 * location for the class.
3833 ret = class_register(&fc_class);
3835 pr_err("couldn't register class fc\n");
3840 * Create a device for the FC-centric udev events
3842 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3844 if (IS_ERR(fc_udev_device)) {
3845 pr_err("couldn't create fc_udev device!\n");
3846 ret = PTR_ERR(fc_udev_device);
3847 goto out_destroy_class;
3850 ret = nvmf_register_transport(&nvme_fc_transport);
3852 goto out_destroy_device;
3857 device_destroy(&fc_class, MKDEV(0, 0));
3859 class_unregister(&fc_class);
3865 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3867 struct nvme_fc_ctrl *ctrl;
3869 spin_lock(&rport->lock);
3870 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3871 dev_warn(ctrl->ctrl.device,
3872 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3874 nvme_delete_ctrl(&ctrl->ctrl);
3876 spin_unlock(&rport->lock);
3879 static void __exit nvme_fc_exit_module(void)
3881 struct nvme_fc_lport *lport;
3882 struct nvme_fc_rport *rport;
3883 unsigned long flags;
3885 spin_lock_irqsave(&nvme_fc_lock, flags);
3886 list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
3887 list_for_each_entry(rport, &lport->endp_list, endp_list)
3888 nvme_fc_delete_controllers(rport);
3889 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3890 flush_workqueue(nvme_delete_wq);
3892 nvmf_unregister_transport(&nvme_fc_transport);
3894 device_destroy(&fc_class, MKDEV(0, 0));
3895 class_unregister(&fc_class);
3898 module_init(nvme_fc_init_module);
3899 module_exit(nvme_fc_exit_module);
3901 MODULE_LICENSE("GPL v2");