1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
26 struct fcloop_ctrl_options {
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
46 static int fcloop_verify_addr(substring_t *s)
48 size_t blen = s->to - s->from + 1;
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
66 options = o = kstrdup(buf, GFP_KERNEL);
70 while ((p = strsep(&o, ",\n")) != NULL) {
74 token = match_token(p, opt_tokens, args);
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
81 goto out_free_options;
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
89 goto out_free_options;
94 if (match_int(args, &token)) {
96 goto out_free_options;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
103 goto out_free_options;
105 opts->fcaddr = token;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
111 goto out_free_options;
113 opts->lpwwnn = token64;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
119 goto out_free_options;
121 opts->lpwwpn = token64;
124 pr_warn("unknown parameter or missing value '%s'\n", p);
126 goto out_free_options;
137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
148 options = o = kstrdup(buf, GFP_KERNEL);
152 while ((p = strsep(&o, ",\n")) != NULL) {
156 token = match_token(p, opt_tokens, args);
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
162 goto out_free_options;
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
170 goto out_free_options;
175 pr_warn("unknown parameter or missing value '%s'\n", p);
177 goto out_free_options;
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
207 struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 struct completion unreg_done;
213 struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
217 struct fcloop_rport {
218 struct nvme_fc_remote_port *remoteport;
219 struct nvmet_fc_target_port *targetport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
223 struct list_head ls_list;
224 struct work_struct ls_work;
227 struct fcloop_tport {
228 struct nvmet_fc_target_port *targetport;
229 struct nvme_fc_remote_port *remoteport;
230 struct fcloop_nport *nport;
231 struct fcloop_lport *lport;
233 struct list_head ls_list;
234 struct work_struct ls_work;
237 struct fcloop_nport {
238 struct fcloop_rport *rport;
239 struct fcloop_tport *tport;
240 struct fcloop_lport *lport;
241 struct list_head nport_list;
249 struct fcloop_lsreq {
250 struct nvmefc_ls_req *lsreq;
251 struct nvmefc_ls_rsp ls_rsp;
252 int lsdir; /* H2T or T2H */
254 struct list_head ls_list; /* fcloop_rport->ls_list */
258 struct fcloop_tport *tport;
259 struct work_struct work;
266 INI_IO_COMPLETED = 3,
269 struct fcloop_fcpreq {
270 struct fcloop_tport *tport;
271 struct nvmefc_fcp_req *fcpreq;
278 struct work_struct fcp_rcv_work;
279 struct work_struct abort_rcv_work;
280 struct work_struct tio_done_work;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req;
284 struct fcloop_ini_fcpreq {
285 struct nvmefc_fcp_req *fcpreq;
286 struct fcloop_fcpreq *tfcp_req;
290 static inline struct fcloop_lsreq *
291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
296 static inline struct fcloop_fcpreq *
297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
304 fcloop_create_queue(struct nvme_fc_local_port *localport,
305 unsigned int qidx, u16 qsize,
313 fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 unsigned int idx, void *handle)
319 fcloop_rport_lsrqst_work(struct work_struct *work)
321 struct fcloop_rport *rport =
322 container_of(work, struct fcloop_rport, ls_work);
323 struct fcloop_lsreq *tls_req;
325 spin_lock(&rport->lock);
327 tls_req = list_first_entry_or_null(&rport->ls_list,
328 struct fcloop_lsreq, ls_list);
332 list_del(&tls_req->ls_list);
333 spin_unlock(&rport->lock);
335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
341 spin_lock(&rport->lock);
343 spin_unlock(&rport->lock);
347 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 struct nvme_fc_remote_port *remoteport,
349 struct nvmefc_ls_req *lsreq)
351 struct fcloop_lsreq *tls_req = lsreq->private;
352 struct fcloop_rport *rport = remoteport->private;
355 tls_req->lsreq = lsreq;
356 INIT_LIST_HEAD(&tls_req->ls_list);
358 if (!rport->targetport) {
359 tls_req->status = -ECONNREFUSED;
360 spin_lock(&rport->lock);
361 list_add_tail(&rport->ls_list, &tls_req->ls_list);
362 spin_unlock(&rport->lock);
363 schedule_work(&rport->ls_work);
368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
370 lsreq->rqstaddr, lsreq->rqstlen);
376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 struct nvmefc_ls_rsp *lsrsp)
379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 struct fcloop_tport *tport = targetport->private;
382 struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 struct fcloop_rport *rport;
385 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 ((lsreq->rsplen < lsrsp->rsplen) ?
387 lsreq->rsplen : lsrsp->rsplen));
392 rport = remoteport->private;
393 spin_lock(&rport->lock);
394 list_add_tail(&rport->ls_list, &tls_req->ls_list);
395 spin_unlock(&rport->lock);
396 schedule_work(&rport->ls_work);
403 fcloop_tport_lsrqst_work(struct work_struct *work)
405 struct fcloop_tport *tport =
406 container_of(work, struct fcloop_tport, ls_work);
407 struct fcloop_lsreq *tls_req;
409 spin_lock(&tport->lock);
411 tls_req = list_first_entry_or_null(&tport->ls_list,
412 struct fcloop_lsreq, ls_list);
416 list_del(&tls_req->ls_list);
417 spin_unlock(&tport->lock);
419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
425 spin_lock(&tport->lock);
427 spin_unlock(&tport->lock);
431 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 struct nvmefc_ls_req *lsreq)
434 struct fcloop_lsreq *tls_req = lsreq->private;
435 struct fcloop_tport *tport = targetport->private;
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
443 tls_req->lsreq = lsreq;
444 INIT_LIST_HEAD(&tls_req->ls_list);
446 if (!tport->remoteport) {
447 tls_req->status = -ECONNREFUSED;
448 spin_lock(&tport->lock);
449 list_add_tail(&tport->ls_list, &tls_req->ls_list);
450 spin_unlock(&tport->lock);
451 schedule_work(&tport->ls_work);
456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 lsreq->rqstaddr, lsreq->rqstlen);
463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 struct nvme_fc_remote_port *remoteport,
465 struct nvmefc_ls_rsp *lsrsp)
467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 struct fcloop_rport *rport = remoteport->private;
470 struct nvmet_fc_target_port *targetport = rport->targetport;
471 struct fcloop_tport *tport;
473 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 ((lsreq->rsplen < lsrsp->rsplen) ?
475 lsreq->rsplen : lsrsp->rsplen));
479 tport = targetport->private;
480 spin_lock(&tport->lock);
481 list_add_tail(&tport->ls_list, &tls_req->ls_list);
482 spin_unlock(&tport->lock);
483 schedule_work(&tport->ls_work);
490 fcloop_t2h_host_release(void *hosthandle)
492 /* host handle ignored for now */
496 * Simulate reception of RSCN and converting it to a initiator transport
497 * call to rescan a remote port.
500 fcloop_tgt_rscn_work(struct work_struct *work)
502 struct fcloop_rscn *tgt_rscn =
503 container_of(work, struct fcloop_rscn, work);
504 struct fcloop_tport *tport = tgt_rscn->tport;
506 if (tport->remoteport)
507 nvme_fc_rescan_remoteport(tport->remoteport);
512 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
514 struct fcloop_rscn *tgt_rscn;
516 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
520 tgt_rscn->tport = tgtport->private;
521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
523 schedule_work(&tgt_rscn->work);
527 fcloop_tfcp_req_free(struct kref *ref)
529 struct fcloop_fcpreq *tfcp_req =
530 container_of(ref, struct fcloop_fcpreq, ref);
536 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
542 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
544 return kref_get_unless_zero(&tfcp_req->ref);
548 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
549 struct fcloop_fcpreq *tfcp_req, int status)
551 struct fcloop_ini_fcpreq *inireq = NULL;
554 inireq = fcpreq->private;
555 spin_lock(&inireq->inilock);
556 inireq->tfcp_req = NULL;
557 spin_unlock(&inireq->inilock);
559 fcpreq->status = status;
560 fcpreq->done(fcpreq);
563 /* release original io reference on tgt struct */
564 fcloop_tfcp_req_put(tfcp_req);
568 fcloop_fcp_recv_work(struct work_struct *work)
570 struct fcloop_fcpreq *tfcp_req =
571 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
572 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
574 bool aborted = false;
576 spin_lock_irq(&tfcp_req->reqlock);
577 switch (tfcp_req->inistate) {
579 tfcp_req->inistate = INI_IO_ACTIVE;
585 spin_unlock_irq(&tfcp_req->reqlock);
589 spin_unlock_irq(&tfcp_req->reqlock);
591 if (unlikely(aborted))
594 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
595 &tfcp_req->tgt_fcp_req,
596 fcpreq->cmdaddr, fcpreq->cmdlen);
598 fcloop_call_host_done(fcpreq, tfcp_req, ret);
604 fcloop_fcp_abort_recv_work(struct work_struct *work)
606 struct fcloop_fcpreq *tfcp_req =
607 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
608 struct nvmefc_fcp_req *fcpreq;
609 bool completed = false;
611 spin_lock_irq(&tfcp_req->reqlock);
612 fcpreq = tfcp_req->fcpreq;
613 switch (tfcp_req->inistate) {
616 case INI_IO_COMPLETED:
620 spin_unlock_irq(&tfcp_req->reqlock);
624 spin_unlock_irq(&tfcp_req->reqlock);
626 if (unlikely(completed)) {
627 /* remove reference taken in original abort downcall */
628 fcloop_tfcp_req_put(tfcp_req);
632 if (tfcp_req->tport->targetport)
633 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
634 &tfcp_req->tgt_fcp_req);
636 spin_lock_irq(&tfcp_req->reqlock);
637 tfcp_req->fcpreq = NULL;
638 spin_unlock_irq(&tfcp_req->reqlock);
640 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
641 /* call_host_done releases reference for abort downcall */
645 * FCP IO operation done by target completion.
646 * call back up initiator "done" flows.
649 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
651 struct fcloop_fcpreq *tfcp_req =
652 container_of(work, struct fcloop_fcpreq, tio_done_work);
653 struct nvmefc_fcp_req *fcpreq;
655 spin_lock_irq(&tfcp_req->reqlock);
656 fcpreq = tfcp_req->fcpreq;
657 tfcp_req->inistate = INI_IO_COMPLETED;
658 spin_unlock_irq(&tfcp_req->reqlock);
660 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
665 fcloop_fcp_req(struct nvme_fc_local_port *localport,
666 struct nvme_fc_remote_port *remoteport,
667 void *hw_queue_handle,
668 struct nvmefc_fcp_req *fcpreq)
670 struct fcloop_rport *rport = remoteport->private;
671 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
672 struct fcloop_fcpreq *tfcp_req;
674 if (!rport->targetport)
675 return -ECONNREFUSED;
677 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
681 inireq->fcpreq = fcpreq;
682 inireq->tfcp_req = tfcp_req;
683 spin_lock_init(&inireq->inilock);
685 tfcp_req->fcpreq = fcpreq;
686 tfcp_req->tport = rport->targetport->private;
687 tfcp_req->inistate = INI_IO_START;
688 spin_lock_init(&tfcp_req->reqlock);
689 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
690 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
691 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
692 kref_init(&tfcp_req->ref);
694 schedule_work(&tfcp_req->fcp_rcv_work);
700 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
701 struct scatterlist *io_sg, u32 offset, u32 length)
704 u32 data_len, io_len, tlen;
706 io_p = sg_virt(io_sg);
707 io_len = io_sg->length;
710 tlen = min_t(u32, offset, io_len);
714 io_sg = sg_next(io_sg);
715 io_p = sg_virt(io_sg);
716 io_len = io_sg->length;
721 data_p = sg_virt(data_sg);
722 data_len = data_sg->length;
725 tlen = min_t(u32, io_len, data_len);
726 tlen = min_t(u32, tlen, length);
728 if (op == NVMET_FCOP_WRITEDATA)
729 memcpy(data_p, io_p, tlen);
731 memcpy(io_p, data_p, tlen);
736 if ((!io_len) && (length)) {
737 io_sg = sg_next(io_sg);
738 io_p = sg_virt(io_sg);
739 io_len = io_sg->length;
744 if ((!data_len) && (length)) {
745 data_sg = sg_next(data_sg);
746 data_p = sg_virt(data_sg);
747 data_len = data_sg->length;
754 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
755 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
757 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
758 struct nvmefc_fcp_req *fcpreq;
759 u32 rsplen = 0, xfrlen = 0;
760 int fcp_err = 0, active, aborted;
761 u8 op = tgt_fcpreq->op;
763 spin_lock_irq(&tfcp_req->reqlock);
764 fcpreq = tfcp_req->fcpreq;
765 active = tfcp_req->active;
766 aborted = tfcp_req->aborted;
767 tfcp_req->active = true;
768 spin_unlock_irq(&tfcp_req->reqlock);
770 if (unlikely(active))
771 /* illegal - call while i/o active */
774 if (unlikely(aborted)) {
775 /* target transport has aborted i/o prior */
776 spin_lock_irq(&tfcp_req->reqlock);
777 tfcp_req->active = false;
778 spin_unlock_irq(&tfcp_req->reqlock);
779 tgt_fcpreq->transferred_length = 0;
780 tgt_fcpreq->fcp_error = -ECANCELED;
781 tgt_fcpreq->done(tgt_fcpreq);
786 * if fcpreq is NULL, the I/O has been aborted (from
787 * initiator side). For the target side, act as if all is well
788 * but don't actually move data.
792 case NVMET_FCOP_WRITEDATA:
793 xfrlen = tgt_fcpreq->transfer_length;
795 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
796 fcpreq->first_sgl, tgt_fcpreq->offset,
798 fcpreq->transferred_length += xfrlen;
802 case NVMET_FCOP_READDATA:
803 case NVMET_FCOP_READDATA_RSP:
804 xfrlen = tgt_fcpreq->transfer_length;
806 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
807 fcpreq->first_sgl, tgt_fcpreq->offset,
809 fcpreq->transferred_length += xfrlen;
811 if (op == NVMET_FCOP_READDATA)
814 /* Fall-Thru to RSP handling */
819 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
820 fcpreq->rsplen : tgt_fcpreq->rsplen);
821 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
822 if (rsplen < tgt_fcpreq->rsplen)
824 fcpreq->rcv_rsplen = rsplen;
827 tfcp_req->status = 0;
835 spin_lock_irq(&tfcp_req->reqlock);
836 tfcp_req->active = false;
837 spin_unlock_irq(&tfcp_req->reqlock);
839 tgt_fcpreq->transferred_length = xfrlen;
840 tgt_fcpreq->fcp_error = fcp_err;
841 tgt_fcpreq->done(tgt_fcpreq);
847 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
848 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
850 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
853 * mark aborted only in case there were 2 threads in transport
854 * (one doing io, other doing abort) and only kills ops posted
855 * after the abort request
857 spin_lock_irq(&tfcp_req->reqlock);
858 tfcp_req->aborted = true;
859 spin_unlock_irq(&tfcp_req->reqlock);
861 tfcp_req->status = NVME_SC_INTERNAL;
864 * nothing more to do. If io wasn't active, the transport should
865 * immediately call the req_release. If it was active, the op
866 * will complete, and the lldd should call req_release.
871 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
872 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
874 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
876 schedule_work(&tfcp_req->tio_done_work);
880 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
881 struct nvme_fc_remote_port *remoteport,
882 struct nvmefc_ls_req *lsreq)
887 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
888 void *hosthandle, struct nvmefc_ls_req *lsreq)
893 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
894 struct nvme_fc_remote_port *remoteport,
895 void *hw_queue_handle,
896 struct nvmefc_fcp_req *fcpreq)
898 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
899 struct fcloop_fcpreq *tfcp_req;
902 spin_lock(&inireq->inilock);
903 tfcp_req = inireq->tfcp_req;
905 fcloop_tfcp_req_get(tfcp_req);
906 spin_unlock(&inireq->inilock);
909 /* abort has already been called */
912 /* break initiator/target relationship for io */
913 spin_lock_irq(&tfcp_req->reqlock);
914 switch (tfcp_req->inistate) {
917 tfcp_req->inistate = INI_IO_ABORTED;
919 case INI_IO_COMPLETED:
923 spin_unlock_irq(&tfcp_req->reqlock);
927 spin_unlock_irq(&tfcp_req->reqlock);
930 /* leave the reference while the work item is scheduled */
931 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
934 * as the io has already had the done callback made,
935 * nothing more to do. So release the reference taken above
937 fcloop_tfcp_req_put(tfcp_req);
942 fcloop_nport_free(struct kref *ref)
944 struct fcloop_nport *nport =
945 container_of(ref, struct fcloop_nport, ref);
948 spin_lock_irqsave(&fcloop_lock, flags);
949 list_del(&nport->nport_list);
950 spin_unlock_irqrestore(&fcloop_lock, flags);
956 fcloop_nport_put(struct fcloop_nport *nport)
958 kref_put(&nport->ref, fcloop_nport_free);
962 fcloop_nport_get(struct fcloop_nport *nport)
964 return kref_get_unless_zero(&nport->ref);
968 fcloop_localport_delete(struct nvme_fc_local_port *localport)
970 struct fcloop_lport_priv *lport_priv = localport->private;
971 struct fcloop_lport *lport = lport_priv->lport;
973 /* release any threads waiting for the unreg to complete */
974 complete(&lport->unreg_done);
978 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
980 struct fcloop_rport *rport = remoteport->private;
982 flush_work(&rport->ls_work);
983 fcloop_nport_put(rport->nport);
987 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
989 struct fcloop_tport *tport = targetport->private;
991 flush_work(&tport->ls_work);
992 fcloop_nport_put(tport->nport);
995 #define FCLOOP_HW_QUEUES 4
996 #define FCLOOP_SGL_SEGS 256
997 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
999 static struct nvme_fc_port_template fctemplate = {
1000 .localport_delete = fcloop_localport_delete,
1001 .remoteport_delete = fcloop_remoteport_delete,
1002 .create_queue = fcloop_create_queue,
1003 .delete_queue = fcloop_delete_queue,
1004 .ls_req = fcloop_h2t_ls_req,
1005 .fcp_io = fcloop_fcp_req,
1006 .ls_abort = fcloop_h2t_ls_abort,
1007 .fcp_abort = fcloop_fcp_abort,
1008 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1009 .max_hw_queues = FCLOOP_HW_QUEUES,
1010 .max_sgl_segments = FCLOOP_SGL_SEGS,
1011 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1012 .dma_boundary = FCLOOP_DMABOUND_4G,
1013 /* sizes of additional private data for data structures */
1014 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1015 .remote_priv_sz = sizeof(struct fcloop_rport),
1016 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1017 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1020 static struct nvmet_fc_target_template tgttemplate = {
1021 .targetport_delete = fcloop_targetport_delete,
1022 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1023 .fcp_op = fcloop_fcp_op,
1024 .fcp_abort = fcloop_tgt_fcp_abort,
1025 .fcp_req_release = fcloop_fcp_req_release,
1026 .discovery_event = fcloop_tgt_discovery_evt,
1027 .ls_req = fcloop_t2h_ls_req,
1028 .ls_abort = fcloop_t2h_ls_abort,
1029 .host_release = fcloop_t2h_host_release,
1030 .max_hw_queues = FCLOOP_HW_QUEUES,
1031 .max_sgl_segments = FCLOOP_SGL_SEGS,
1032 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1033 .dma_boundary = FCLOOP_DMABOUND_4G,
1034 /* optional features */
1035 .target_features = 0,
1036 /* sizes of additional private data for data structures */
1037 .target_priv_sz = sizeof(struct fcloop_tport),
1038 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1042 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1043 const char *buf, size_t count)
1045 struct nvme_fc_port_info pinfo;
1046 struct fcloop_ctrl_options *opts;
1047 struct nvme_fc_local_port *localport;
1048 struct fcloop_lport *lport;
1049 struct fcloop_lport_priv *lport_priv;
1050 unsigned long flags;
1053 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1057 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1059 goto out_free_lport;
1061 ret = fcloop_parse_options(opts, buf);
1065 /* everything there ? */
1066 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1071 memset(&pinfo, 0, sizeof(pinfo));
1072 pinfo.node_name = opts->wwnn;
1073 pinfo.port_name = opts->wwpn;
1074 pinfo.port_role = opts->roles;
1075 pinfo.port_id = opts->fcaddr;
1077 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1080 lport_priv = localport->private;
1081 lport_priv->lport = lport;
1083 lport->localport = localport;
1084 INIT_LIST_HEAD(&lport->lport_list);
1086 spin_lock_irqsave(&fcloop_lock, flags);
1087 list_add_tail(&lport->lport_list, &fcloop_lports);
1088 spin_unlock_irqrestore(&fcloop_lock, flags);
1094 /* free only if we're going to fail */
1098 return ret ? ret : count;
1103 __unlink_local_port(struct fcloop_lport *lport)
1105 list_del(&lport->lport_list);
1109 __wait_localport_unreg(struct fcloop_lport *lport)
1113 init_completion(&lport->unreg_done);
1115 ret = nvme_fc_unregister_localport(lport->localport);
1117 wait_for_completion(&lport->unreg_done);
1126 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1127 const char *buf, size_t count)
1129 struct fcloop_lport *tlport, *lport = NULL;
1130 u64 nodename, portname;
1131 unsigned long flags;
1134 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1138 spin_lock_irqsave(&fcloop_lock, flags);
1140 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1141 if (tlport->localport->node_name == nodename &&
1142 tlport->localport->port_name == portname) {
1144 __unlink_local_port(lport);
1148 spin_unlock_irqrestore(&fcloop_lock, flags);
1153 ret = __wait_localport_unreg(lport);
1155 return ret ? ret : count;
1158 static struct fcloop_nport *
1159 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1161 struct fcloop_nport *newnport, *nport = NULL;
1162 struct fcloop_lport *tmplport, *lport = NULL;
1163 struct fcloop_ctrl_options *opts;
1164 unsigned long flags;
1165 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1168 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1172 ret = fcloop_parse_options(opts, buf);
1176 /* everything there ? */
1177 if ((opts->mask & opts_mask) != opts_mask) {
1182 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1186 INIT_LIST_HEAD(&newnport->nport_list);
1187 newnport->node_name = opts->wwnn;
1188 newnport->port_name = opts->wwpn;
1189 if (opts->mask & NVMF_OPT_ROLES)
1190 newnport->port_role = opts->roles;
1191 if (opts->mask & NVMF_OPT_FCADDR)
1192 newnport->port_id = opts->fcaddr;
1193 kref_init(&newnport->ref);
1195 spin_lock_irqsave(&fcloop_lock, flags);
1197 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1198 if (tmplport->localport->node_name == opts->wwnn &&
1199 tmplport->localport->port_name == opts->wwpn)
1200 goto out_invalid_opts;
1202 if (tmplport->localport->node_name == opts->lpwwnn &&
1203 tmplport->localport->port_name == opts->lpwwpn)
1209 goto out_invalid_opts;
1210 newnport->lport = lport;
1213 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1214 if (nport->node_name == opts->wwnn &&
1215 nport->port_name == opts->wwpn) {
1216 if ((remoteport && nport->rport) ||
1217 (!remoteport && nport->tport)) {
1219 goto out_invalid_opts;
1222 fcloop_nport_get(nport);
1224 spin_unlock_irqrestore(&fcloop_lock, flags);
1227 nport->lport = lport;
1228 if (opts->mask & NVMF_OPT_ROLES)
1229 nport->port_role = opts->roles;
1230 if (opts->mask & NVMF_OPT_FCADDR)
1231 nport->port_id = opts->fcaddr;
1232 goto out_free_newnport;
1236 list_add_tail(&newnport->nport_list, &fcloop_nports);
1238 spin_unlock_irqrestore(&fcloop_lock, flags);
1244 spin_unlock_irqrestore(&fcloop_lock, flags);
1253 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1254 const char *buf, size_t count)
1256 struct nvme_fc_remote_port *remoteport;
1257 struct fcloop_nport *nport;
1258 struct fcloop_rport *rport;
1259 struct nvme_fc_port_info pinfo;
1262 nport = fcloop_alloc_nport(buf, count, true);
1266 memset(&pinfo, 0, sizeof(pinfo));
1267 pinfo.node_name = nport->node_name;
1268 pinfo.port_name = nport->port_name;
1269 pinfo.port_role = nport->port_role;
1270 pinfo.port_id = nport->port_id;
1272 ret = nvme_fc_register_remoteport(nport->lport->localport,
1273 &pinfo, &remoteport);
1274 if (ret || !remoteport) {
1275 fcloop_nport_put(nport);
1280 rport = remoteport->private;
1281 rport->remoteport = remoteport;
1282 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1284 nport->tport->remoteport = remoteport;
1285 nport->tport->lport = nport->lport;
1287 rport->nport = nport;
1288 rport->lport = nport->lport;
1289 nport->rport = rport;
1290 spin_lock_init(&rport->lock);
1291 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1292 INIT_LIST_HEAD(&rport->ls_list);
1298 static struct fcloop_rport *
1299 __unlink_remote_port(struct fcloop_nport *nport)
1301 struct fcloop_rport *rport = nport->rport;
1303 if (rport && nport->tport)
1304 nport->tport->remoteport = NULL;
1305 nport->rport = NULL;
1311 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1316 return nvme_fc_unregister_remoteport(rport->remoteport);
1320 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1321 const char *buf, size_t count)
1323 struct fcloop_nport *nport = NULL, *tmpport;
1324 static struct fcloop_rport *rport;
1325 u64 nodename, portname;
1326 unsigned long flags;
1329 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1333 spin_lock_irqsave(&fcloop_lock, flags);
1335 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1336 if (tmpport->node_name == nodename &&
1337 tmpport->port_name == portname && tmpport->rport) {
1339 rport = __unlink_remote_port(nport);
1344 spin_unlock_irqrestore(&fcloop_lock, flags);
1349 ret = __remoteport_unreg(nport, rport);
1351 return ret ? ret : count;
1355 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1356 const char *buf, size_t count)
1358 struct nvmet_fc_target_port *targetport;
1359 struct fcloop_nport *nport;
1360 struct fcloop_tport *tport;
1361 struct nvmet_fc_port_info tinfo;
1364 nport = fcloop_alloc_nport(buf, count, false);
1368 tinfo.node_name = nport->node_name;
1369 tinfo.port_name = nport->port_name;
1370 tinfo.port_id = nport->port_id;
1372 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1375 fcloop_nport_put(nport);
1380 tport = targetport->private;
1381 tport->targetport = targetport;
1382 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1384 nport->rport->targetport = targetport;
1385 tport->nport = nport;
1386 tport->lport = nport->lport;
1387 nport->tport = tport;
1388 spin_lock_init(&tport->lock);
1389 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1390 INIT_LIST_HEAD(&tport->ls_list);
1396 static struct fcloop_tport *
1397 __unlink_target_port(struct fcloop_nport *nport)
1399 struct fcloop_tport *tport = nport->tport;
1401 if (tport && nport->rport)
1402 nport->rport->targetport = NULL;
1403 nport->tport = NULL;
1409 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1414 return nvmet_fc_unregister_targetport(tport->targetport);
1418 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1419 const char *buf, size_t count)
1421 struct fcloop_nport *nport = NULL, *tmpport;
1422 struct fcloop_tport *tport = NULL;
1423 u64 nodename, portname;
1424 unsigned long flags;
1427 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1431 spin_lock_irqsave(&fcloop_lock, flags);
1433 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1434 if (tmpport->node_name == nodename &&
1435 tmpport->port_name == portname && tmpport->tport) {
1437 tport = __unlink_target_port(nport);
1442 spin_unlock_irqrestore(&fcloop_lock, flags);
1447 ret = __targetport_unreg(nport, tport);
1449 return ret ? ret : count;
1453 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1454 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1455 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1456 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1457 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1458 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1460 static struct attribute *fcloop_dev_attrs[] = {
1461 &dev_attr_add_local_port.attr,
1462 &dev_attr_del_local_port.attr,
1463 &dev_attr_add_remote_port.attr,
1464 &dev_attr_del_remote_port.attr,
1465 &dev_attr_add_target_port.attr,
1466 &dev_attr_del_target_port.attr,
1470 static struct attribute_group fclopp_dev_attrs_group = {
1471 .attrs = fcloop_dev_attrs,
1474 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1475 &fclopp_dev_attrs_group,
1479 static struct class *fcloop_class;
1480 static struct device *fcloop_device;
1483 static int __init fcloop_init(void)
1487 fcloop_class = class_create(THIS_MODULE, "fcloop");
1488 if (IS_ERR(fcloop_class)) {
1489 pr_err("couldn't register class fcloop\n");
1490 ret = PTR_ERR(fcloop_class);
1494 fcloop_device = device_create_with_groups(
1495 fcloop_class, NULL, MKDEV(0, 0), NULL,
1496 fcloop_dev_attr_groups, "ctl");
1497 if (IS_ERR(fcloop_device)) {
1498 pr_err("couldn't create ctl device!\n");
1499 ret = PTR_ERR(fcloop_device);
1500 goto out_destroy_class;
1503 get_device(fcloop_device);
1508 class_destroy(fcloop_class);
1512 static void __exit fcloop_exit(void)
1514 struct fcloop_lport *lport;
1515 struct fcloop_nport *nport;
1516 struct fcloop_tport *tport;
1517 struct fcloop_rport *rport;
1518 unsigned long flags;
1521 spin_lock_irqsave(&fcloop_lock, flags);
1524 nport = list_first_entry_or_null(&fcloop_nports,
1525 typeof(*nport), nport_list);
1529 tport = __unlink_target_port(nport);
1530 rport = __unlink_remote_port(nport);
1532 spin_unlock_irqrestore(&fcloop_lock, flags);
1534 ret = __targetport_unreg(nport, tport);
1536 pr_warn("%s: Failed deleting target port\n", __func__);
1538 ret = __remoteport_unreg(nport, rport);
1540 pr_warn("%s: Failed deleting remote port\n", __func__);
1542 spin_lock_irqsave(&fcloop_lock, flags);
1546 lport = list_first_entry_or_null(&fcloop_lports,
1547 typeof(*lport), lport_list);
1551 __unlink_local_port(lport);
1553 spin_unlock_irqrestore(&fcloop_lock, flags);
1555 ret = __wait_localport_unreg(lport);
1557 pr_warn("%s: Failed deleting local port\n", __func__);
1559 spin_lock_irqsave(&fcloop_lock, flags);
1562 spin_unlock_irqrestore(&fcloop_lock, flags);
1564 put_device(fcloop_device);
1566 device_destroy(fcloop_class, MKDEV(0, 0));
1567 class_destroy(fcloop_class);
1570 module_init(fcloop_init);
1571 module_exit(fcloop_exit);
1573 MODULE_LICENSE("GPL v2");