2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN = 1 << 0,
30 NVMF_OPT_WWPN = 1 << 1,
31 NVMF_OPT_ROLES = 1 << 2,
32 NVMF_OPT_FCADDR = 1 << 3,
33 NVMF_OPT_LPWWNN = 1 << 4,
34 NVMF_OPT_LPWWPN = 1 << 5,
37 struct fcloop_ctrl_options {
47 static const match_table_t opt_tokens = {
48 { NVMF_OPT_WWNN, "wwnn=%s" },
49 { NVMF_OPT_WWPN, "wwpn=%s" },
50 { NVMF_OPT_ROLES, "roles=%d" },
51 { NVMF_OPT_FCADDR, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
54 { NVMF_OPT_ERR, NULL }
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
66 options = o = kstrdup(buf, GFP_KERNEL);
70 while ((p = strsep(&o, ",\n")) != NULL) {
74 token = match_token(p, opt_tokens, args);
78 if (match_u64(args, &token64)) {
80 goto out_free_options;
85 if (match_u64(args, &token64)) {
87 goto out_free_options;
92 if (match_int(args, &token)) {
94 goto out_free_options;
99 if (match_hex(args, &token)) {
101 goto out_free_options;
103 opts->fcaddr = token;
105 case NVMF_OPT_LPWWNN:
106 if (match_u64(args, &token64)) {
108 goto out_free_options;
110 opts->lpwwnn = token64;
112 case NVMF_OPT_LPWWPN:
113 if (match_u64(args, &token64)) {
115 goto out_free_options;
117 opts->lpwwpn = token64;
120 pr_warn("unknown parameter or missing value '%s'\n", p);
122 goto out_free_options;
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
136 substring_t args[MAX_OPT_ARGS];
137 char *options, *o, *p;
144 options = o = kstrdup(buf, GFP_KERNEL);
148 while ((p = strsep(&o, ",\n")) != NULL) {
152 token = match_token(p, opt_tokens, args);
155 if (match_u64(args, &token64)) {
157 goto out_free_options;
162 if (match_u64(args, &token64)) {
164 goto out_free_options;
169 pr_warn("unknown parameter or missing value '%s'\n", p);
171 goto out_free_options;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
201 struct fcloop_lport {
202 struct nvme_fc_local_port *localport;
203 struct list_head lport_list;
204 struct completion unreg_done;
207 struct fcloop_lport_priv {
208 struct fcloop_lport *lport;
211 struct fcloop_rport {
212 struct nvme_fc_remote_port *remoteport;
213 struct nvmet_fc_target_port *targetport;
214 struct fcloop_nport *nport;
215 struct fcloop_lport *lport;
218 struct fcloop_tport {
219 struct nvmet_fc_target_port *targetport;
220 struct nvme_fc_remote_port *remoteport;
221 struct fcloop_nport *nport;
222 struct fcloop_lport *lport;
225 struct fcloop_nport {
226 struct fcloop_rport *rport;
227 struct fcloop_tport *tport;
228 struct fcloop_lport *lport;
229 struct list_head nport_list;
237 struct fcloop_lsreq {
238 struct fcloop_tport *tport;
239 struct nvmefc_ls_req *lsreq;
240 struct work_struct work;
241 struct nvmefc_tgt_ls_req tgt_ls_req;
245 struct fcloop_fcpreq {
246 struct fcloop_tport *tport;
247 struct nvmefc_fcp_req *fcpreq;
252 struct work_struct work;
253 struct nvmefc_tgt_fcp_req tgt_fcp_req;
256 struct fcloop_ini_fcpreq {
257 struct nvmefc_fcp_req *fcpreq;
258 struct fcloop_fcpreq *tfcp_req;
259 struct work_struct iniwork;
262 static inline struct fcloop_lsreq *
263 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
265 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
268 static inline struct fcloop_fcpreq *
269 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
271 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
276 fcloop_create_queue(struct nvme_fc_local_port *localport,
277 unsigned int qidx, u16 qsize,
285 fcloop_delete_queue(struct nvme_fc_local_port *localport,
286 unsigned int idx, void *handle)
292 * Transmit of LS RSP done (e.g. buffers all set). call back up
293 * initiator "done" flows.
296 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
298 struct fcloop_lsreq *tls_req =
299 container_of(work, struct fcloop_lsreq, work);
300 struct fcloop_tport *tport = tls_req->tport;
301 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
303 if (!tport || tport->remoteport)
304 lsreq->done(lsreq, tls_req->status);
308 fcloop_ls_req(struct nvme_fc_local_port *localport,
309 struct nvme_fc_remote_port *remoteport,
310 struct nvmefc_ls_req *lsreq)
312 struct fcloop_lsreq *tls_req = lsreq->private;
313 struct fcloop_rport *rport = remoteport->private;
316 tls_req->lsreq = lsreq;
317 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
319 if (!rport->targetport) {
320 tls_req->status = -ECONNREFUSED;
321 tls_req->tport = NULL;
322 schedule_work(&tls_req->work);
327 tls_req->tport = rport->targetport->private;
328 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
329 lsreq->rqstaddr, lsreq->rqstlen);
335 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
336 struct nvmefc_tgt_ls_req *tgt_lsreq)
338 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
339 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
341 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
342 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
343 lsreq->rsplen : tgt_lsreq->rsplen));
344 tgt_lsreq->done(tgt_lsreq);
346 schedule_work(&tls_req->work);
352 * FCP IO operation done by initiator abort.
353 * call back up initiator "done" flows.
356 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
358 struct fcloop_ini_fcpreq *inireq =
359 container_of(work, struct fcloop_ini_fcpreq, iniwork);
361 inireq->fcpreq->done(inireq->fcpreq);
365 * FCP IO operation done by target completion.
366 * call back up initiator "done" flows.
369 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
371 struct fcloop_fcpreq *tfcp_req =
372 container_of(work, struct fcloop_fcpreq, work);
373 struct fcloop_tport *tport = tfcp_req->tport;
374 struct nvmefc_fcp_req *fcpreq;
376 spin_lock(&tfcp_req->reqlock);
377 fcpreq = tfcp_req->fcpreq;
378 tfcp_req->fcpreq = NULL;
379 spin_unlock(&tfcp_req->reqlock);
381 if (tport->remoteport && fcpreq) {
382 fcpreq->status = tfcp_req->status;
383 fcpreq->done(fcpreq);
391 fcloop_fcp_req(struct nvme_fc_local_port *localport,
392 struct nvme_fc_remote_port *remoteport,
393 void *hw_queue_handle,
394 struct nvmefc_fcp_req *fcpreq)
396 struct fcloop_rport *rport = remoteport->private;
397 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
398 struct fcloop_fcpreq *tfcp_req;
401 if (!rport->targetport)
402 return -ECONNREFUSED;
404 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
408 inireq->fcpreq = fcpreq;
409 inireq->tfcp_req = tfcp_req;
410 INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
411 tfcp_req->fcpreq = fcpreq;
412 tfcp_req->tport = rport->targetport->private;
413 spin_lock_init(&tfcp_req->reqlock);
414 INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
416 ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
417 fcpreq->cmdaddr, fcpreq->cmdlen);
423 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
424 struct scatterlist *io_sg, u32 offset, u32 length)
427 u32 data_len, io_len, tlen;
429 io_p = sg_virt(io_sg);
430 io_len = io_sg->length;
433 tlen = min_t(u32, offset, io_len);
437 io_sg = sg_next(io_sg);
438 io_p = sg_virt(io_sg);
439 io_len = io_sg->length;
444 data_p = sg_virt(data_sg);
445 data_len = data_sg->length;
448 tlen = min_t(u32, io_len, data_len);
449 tlen = min_t(u32, tlen, length);
451 if (op == NVMET_FCOP_WRITEDATA)
452 memcpy(data_p, io_p, tlen);
454 memcpy(io_p, data_p, tlen);
459 if ((!io_len) && (length)) {
460 io_sg = sg_next(io_sg);
461 io_p = sg_virt(io_sg);
462 io_len = io_sg->length;
467 if ((!data_len) && (length)) {
468 data_sg = sg_next(data_sg);
469 data_p = sg_virt(data_sg);
470 data_len = data_sg->length;
477 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
478 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
480 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
481 struct nvmefc_fcp_req *fcpreq;
482 u32 rsplen = 0, xfrlen = 0;
483 int fcp_err = 0, active, aborted;
484 u8 op = tgt_fcpreq->op;
486 spin_lock(&tfcp_req->reqlock);
487 fcpreq = tfcp_req->fcpreq;
488 active = tfcp_req->active;
489 aborted = tfcp_req->aborted;
490 tfcp_req->active = true;
491 spin_unlock(&tfcp_req->reqlock);
493 if (unlikely(active))
494 /* illegal - call while i/o active */
497 if (unlikely(aborted)) {
498 /* target transport has aborted i/o prior */
499 spin_lock(&tfcp_req->reqlock);
500 tfcp_req->active = false;
501 spin_unlock(&tfcp_req->reqlock);
502 tgt_fcpreq->transferred_length = 0;
503 tgt_fcpreq->fcp_error = -ECANCELED;
504 tgt_fcpreq->done(tgt_fcpreq);
509 * if fcpreq is NULL, the I/O has been aborted (from
510 * initiator side). For the target side, act as if all is well
511 * but don't actually move data.
515 case NVMET_FCOP_WRITEDATA:
516 xfrlen = tgt_fcpreq->transfer_length;
518 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
519 fcpreq->first_sgl, tgt_fcpreq->offset,
521 fcpreq->transferred_length += xfrlen;
525 case NVMET_FCOP_READDATA:
526 case NVMET_FCOP_READDATA_RSP:
527 xfrlen = tgt_fcpreq->transfer_length;
529 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
530 fcpreq->first_sgl, tgt_fcpreq->offset,
532 fcpreq->transferred_length += xfrlen;
534 if (op == NVMET_FCOP_READDATA)
537 /* Fall-Thru to RSP handling */
542 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
543 fcpreq->rsplen : tgt_fcpreq->rsplen);
544 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
545 if (rsplen < tgt_fcpreq->rsplen)
547 fcpreq->rcv_rsplen = rsplen;
550 tfcp_req->status = 0;
558 spin_lock(&tfcp_req->reqlock);
559 tfcp_req->active = false;
560 spin_unlock(&tfcp_req->reqlock);
562 tgt_fcpreq->transferred_length = xfrlen;
563 tgt_fcpreq->fcp_error = fcp_err;
564 tgt_fcpreq->done(tgt_fcpreq);
570 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
571 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
573 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
576 * mark aborted only in case there were 2 threads in transport
577 * (one doing io, other doing abort) and only kills ops posted
578 * after the abort request
580 spin_lock(&tfcp_req->reqlock);
581 tfcp_req->aborted = true;
582 spin_unlock(&tfcp_req->reqlock);
584 tfcp_req->status = NVME_SC_INTERNAL;
587 * nothing more to do. If io wasn't active, the transport should
588 * immediately call the req_release. If it was active, the op
589 * will complete, and the lldd should call req_release.
594 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
595 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
597 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
599 schedule_work(&tfcp_req->work);
603 fcloop_ls_abort(struct nvme_fc_local_port *localport,
604 struct nvme_fc_remote_port *remoteport,
605 struct nvmefc_ls_req *lsreq)
610 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
611 struct nvme_fc_remote_port *remoteport,
612 void *hw_queue_handle,
613 struct nvmefc_fcp_req *fcpreq)
615 struct fcloop_rport *rport = remoteport->private;
616 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
617 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
620 /* abort has already been called */
623 /* break initiator/target relationship for io */
624 spin_lock(&tfcp_req->reqlock);
625 inireq->tfcp_req = NULL;
626 tfcp_req->fcpreq = NULL;
627 spin_unlock(&tfcp_req->reqlock);
629 if (rport->targetport)
630 nvmet_fc_rcv_fcp_abort(rport->targetport,
631 &tfcp_req->tgt_fcp_req);
634 /* post the aborted io completion */
635 fcpreq->status = -ECANCELED;
636 schedule_work(&inireq->iniwork);
640 fcloop_nport_free(struct kref *ref)
642 struct fcloop_nport *nport =
643 container_of(ref, struct fcloop_nport, ref);
646 spin_lock_irqsave(&fcloop_lock, flags);
647 list_del(&nport->nport_list);
648 spin_unlock_irqrestore(&fcloop_lock, flags);
654 fcloop_nport_put(struct fcloop_nport *nport)
656 kref_put(&nport->ref, fcloop_nport_free);
660 fcloop_nport_get(struct fcloop_nport *nport)
662 return kref_get_unless_zero(&nport->ref);
666 fcloop_localport_delete(struct nvme_fc_local_port *localport)
668 struct fcloop_lport_priv *lport_priv = localport->private;
669 struct fcloop_lport *lport = lport_priv->lport;
671 /* release any threads waiting for the unreg to complete */
672 complete(&lport->unreg_done);
676 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
678 struct fcloop_rport *rport = remoteport->private;
680 fcloop_nport_put(rport->nport);
684 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
686 struct fcloop_tport *tport = targetport->private;
688 fcloop_nport_put(tport->nport);
691 #define FCLOOP_HW_QUEUES 4
692 #define FCLOOP_SGL_SEGS 256
693 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
695 static struct nvme_fc_port_template fctemplate = {
696 .localport_delete = fcloop_localport_delete,
697 .remoteport_delete = fcloop_remoteport_delete,
698 .create_queue = fcloop_create_queue,
699 .delete_queue = fcloop_delete_queue,
700 .ls_req = fcloop_ls_req,
701 .fcp_io = fcloop_fcp_req,
702 .ls_abort = fcloop_ls_abort,
703 .fcp_abort = fcloop_fcp_abort,
704 .max_hw_queues = FCLOOP_HW_QUEUES,
705 .max_sgl_segments = FCLOOP_SGL_SEGS,
706 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
707 .dma_boundary = FCLOOP_DMABOUND_4G,
708 /* sizes of additional private data for data structures */
709 .local_priv_sz = sizeof(struct fcloop_lport_priv),
710 .remote_priv_sz = sizeof(struct fcloop_rport),
711 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
712 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
715 static struct nvmet_fc_target_template tgttemplate = {
716 .targetport_delete = fcloop_targetport_delete,
717 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
718 .fcp_op = fcloop_fcp_op,
719 .fcp_abort = fcloop_tgt_fcp_abort,
720 .fcp_req_release = fcloop_fcp_req_release,
721 .max_hw_queues = FCLOOP_HW_QUEUES,
722 .max_sgl_segments = FCLOOP_SGL_SEGS,
723 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
724 .dma_boundary = FCLOOP_DMABOUND_4G,
725 /* optional features */
726 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
727 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
728 /* sizes of additional private data for data structures */
729 .target_priv_sz = sizeof(struct fcloop_tport),
733 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
734 const char *buf, size_t count)
736 struct nvme_fc_port_info pinfo;
737 struct fcloop_ctrl_options *opts;
738 struct nvme_fc_local_port *localport;
739 struct fcloop_lport *lport;
740 struct fcloop_lport_priv *lport_priv;
744 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
748 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
752 ret = fcloop_parse_options(opts, buf);
756 /* everything there ? */
757 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
762 memset(&pinfo, 0, sizeof(pinfo));
763 pinfo.node_name = opts->wwnn;
764 pinfo.port_name = opts->wwpn;
765 pinfo.port_role = opts->roles;
766 pinfo.port_id = opts->fcaddr;
768 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
771 lport_priv = localport->private;
772 lport_priv->lport = lport;
774 lport->localport = localport;
775 INIT_LIST_HEAD(&lport->lport_list);
777 spin_lock_irqsave(&fcloop_lock, flags);
778 list_add_tail(&lport->lport_list, &fcloop_lports);
779 spin_unlock_irqrestore(&fcloop_lock, flags);
785 /* free only if we're going to fail */
789 return ret ? ret : count;
794 __unlink_local_port(struct fcloop_lport *lport)
796 list_del(&lport->lport_list);
800 __wait_localport_unreg(struct fcloop_lport *lport)
804 init_completion(&lport->unreg_done);
806 ret = nvme_fc_unregister_localport(lport->localport);
808 wait_for_completion(&lport->unreg_done);
817 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
818 const char *buf, size_t count)
820 struct fcloop_lport *tlport, *lport = NULL;
821 u64 nodename, portname;
825 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
829 spin_lock_irqsave(&fcloop_lock, flags);
831 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
832 if (tlport->localport->node_name == nodename &&
833 tlport->localport->port_name == portname) {
835 __unlink_local_port(lport);
839 spin_unlock_irqrestore(&fcloop_lock, flags);
844 ret = __wait_localport_unreg(lport);
846 return ret ? ret : count;
849 static struct fcloop_nport *
850 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
852 struct fcloop_nport *newnport, *nport = NULL;
853 struct fcloop_lport *tmplport, *lport = NULL;
854 struct fcloop_ctrl_options *opts;
856 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
859 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
863 ret = fcloop_parse_options(opts, buf);
867 /* everything there ? */
868 if ((opts->mask & opts_mask) != opts_mask) {
873 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
877 INIT_LIST_HEAD(&newnport->nport_list);
878 newnport->node_name = opts->wwnn;
879 newnport->port_name = opts->wwpn;
880 if (opts->mask & NVMF_OPT_ROLES)
881 newnport->port_role = opts->roles;
882 if (opts->mask & NVMF_OPT_FCADDR)
883 newnport->port_id = opts->fcaddr;
884 kref_init(&newnport->ref);
886 spin_lock_irqsave(&fcloop_lock, flags);
888 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
889 if (tmplport->localport->node_name == opts->wwnn &&
890 tmplport->localport->port_name == opts->wwpn)
891 goto out_invalid_opts;
893 if (tmplport->localport->node_name == opts->lpwwnn &&
894 tmplport->localport->port_name == opts->lpwwpn)
900 goto out_invalid_opts;
901 newnport->lport = lport;
904 list_for_each_entry(nport, &fcloop_nports, nport_list) {
905 if (nport->node_name == opts->wwnn &&
906 nport->port_name == opts->wwpn) {
907 if ((remoteport && nport->rport) ||
908 (!remoteport && nport->tport)) {
910 goto out_invalid_opts;
913 fcloop_nport_get(nport);
915 spin_unlock_irqrestore(&fcloop_lock, flags);
918 nport->lport = lport;
919 if (opts->mask & NVMF_OPT_ROLES)
920 nport->port_role = opts->roles;
921 if (opts->mask & NVMF_OPT_FCADDR)
922 nport->port_id = opts->fcaddr;
923 goto out_free_newnport;
927 list_add_tail(&newnport->nport_list, &fcloop_nports);
929 spin_unlock_irqrestore(&fcloop_lock, flags);
935 spin_unlock_irqrestore(&fcloop_lock, flags);
944 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
945 const char *buf, size_t count)
947 struct nvme_fc_remote_port *remoteport;
948 struct fcloop_nport *nport;
949 struct fcloop_rport *rport;
950 struct nvme_fc_port_info pinfo;
953 nport = fcloop_alloc_nport(buf, count, true);
957 memset(&pinfo, 0, sizeof(pinfo));
958 pinfo.node_name = nport->node_name;
959 pinfo.port_name = nport->port_name;
960 pinfo.port_role = nport->port_role;
961 pinfo.port_id = nport->port_id;
963 ret = nvme_fc_register_remoteport(nport->lport->localport,
964 &pinfo, &remoteport);
965 if (ret || !remoteport) {
966 fcloop_nport_put(nport);
971 rport = remoteport->private;
972 rport->remoteport = remoteport;
973 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
975 nport->tport->remoteport = remoteport;
976 nport->tport->lport = nport->lport;
978 rport->nport = nport;
979 rport->lport = nport->lport;
980 nport->rport = rport;
986 static struct fcloop_rport *
987 __unlink_remote_port(struct fcloop_nport *nport)
989 struct fcloop_rport *rport = nport->rport;
991 if (rport && nport->tport)
992 nport->tport->remoteport = NULL;
999 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1004 return nvme_fc_unregister_remoteport(rport->remoteport);
1008 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1009 const char *buf, size_t count)
1011 struct fcloop_nport *nport = NULL, *tmpport;
1012 static struct fcloop_rport *rport;
1013 u64 nodename, portname;
1014 unsigned long flags;
1017 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1021 spin_lock_irqsave(&fcloop_lock, flags);
1023 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1024 if (tmpport->node_name == nodename &&
1025 tmpport->port_name == portname && tmpport->rport) {
1027 rport = __unlink_remote_port(nport);
1032 spin_unlock_irqrestore(&fcloop_lock, flags);
1037 ret = __remoteport_unreg(nport, rport);
1039 return ret ? ret : count;
1043 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1044 const char *buf, size_t count)
1046 struct nvmet_fc_target_port *targetport;
1047 struct fcloop_nport *nport;
1048 struct fcloop_tport *tport;
1049 struct nvmet_fc_port_info tinfo;
1052 nport = fcloop_alloc_nport(buf, count, false);
1056 tinfo.node_name = nport->node_name;
1057 tinfo.port_name = nport->port_name;
1058 tinfo.port_id = nport->port_id;
1060 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1063 fcloop_nport_put(nport);
1068 tport = targetport->private;
1069 tport->targetport = targetport;
1070 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1072 nport->rport->targetport = targetport;
1073 tport->nport = nport;
1074 tport->lport = nport->lport;
1075 nport->tport = tport;
1081 static struct fcloop_tport *
1082 __unlink_target_port(struct fcloop_nport *nport)
1084 struct fcloop_tport *tport = nport->tport;
1086 if (tport && nport->rport)
1087 nport->rport->targetport = NULL;
1088 nport->tport = NULL;
1094 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1099 return nvmet_fc_unregister_targetport(tport->targetport);
1103 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1104 const char *buf, size_t count)
1106 struct fcloop_nport *nport = NULL, *tmpport;
1107 struct fcloop_tport *tport;
1108 u64 nodename, portname;
1109 unsigned long flags;
1112 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1116 spin_lock_irqsave(&fcloop_lock, flags);
1118 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1119 if (tmpport->node_name == nodename &&
1120 tmpport->port_name == portname && tmpport->tport) {
1122 tport = __unlink_target_port(nport);
1127 spin_unlock_irqrestore(&fcloop_lock, flags);
1132 ret = __targetport_unreg(nport, tport);
1134 return ret ? ret : count;
1138 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1139 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1140 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1141 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1142 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1143 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1145 static struct attribute *fcloop_dev_attrs[] = {
1146 &dev_attr_add_local_port.attr,
1147 &dev_attr_del_local_port.attr,
1148 &dev_attr_add_remote_port.attr,
1149 &dev_attr_del_remote_port.attr,
1150 &dev_attr_add_target_port.attr,
1151 &dev_attr_del_target_port.attr,
1155 static struct attribute_group fclopp_dev_attrs_group = {
1156 .attrs = fcloop_dev_attrs,
1159 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1160 &fclopp_dev_attrs_group,
1164 static struct class *fcloop_class;
1165 static struct device *fcloop_device;
1168 static int __init fcloop_init(void)
1172 fcloop_class = class_create(THIS_MODULE, "fcloop");
1173 if (IS_ERR(fcloop_class)) {
1174 pr_err("couldn't register class fcloop\n");
1175 ret = PTR_ERR(fcloop_class);
1179 fcloop_device = device_create_with_groups(
1180 fcloop_class, NULL, MKDEV(0, 0), NULL,
1181 fcloop_dev_attr_groups, "ctl");
1182 if (IS_ERR(fcloop_device)) {
1183 pr_err("couldn't create ctl device!\n");
1184 ret = PTR_ERR(fcloop_device);
1185 goto out_destroy_class;
1188 get_device(fcloop_device);
1193 class_destroy(fcloop_class);
1197 static void __exit fcloop_exit(void)
1199 struct fcloop_lport *lport;
1200 struct fcloop_nport *nport;
1201 struct fcloop_tport *tport;
1202 struct fcloop_rport *rport;
1203 unsigned long flags;
1206 spin_lock_irqsave(&fcloop_lock, flags);
1209 nport = list_first_entry_or_null(&fcloop_nports,
1210 typeof(*nport), nport_list);
1214 tport = __unlink_target_port(nport);
1215 rport = __unlink_remote_port(nport);
1217 spin_unlock_irqrestore(&fcloop_lock, flags);
1219 ret = __targetport_unreg(nport, tport);
1221 pr_warn("%s: Failed deleting target port\n", __func__);
1223 ret = __remoteport_unreg(nport, rport);
1225 pr_warn("%s: Failed deleting remote port\n", __func__);
1227 spin_lock_irqsave(&fcloop_lock, flags);
1231 lport = list_first_entry_or_null(&fcloop_lports,
1232 typeof(*lport), lport_list);
1236 __unlink_local_port(lport);
1238 spin_unlock_irqrestore(&fcloop_lock, flags);
1240 ret = __wait_localport_unreg(lport);
1242 pr_warn("%s: Failed deleting local port\n", __func__);
1244 spin_lock_irqsave(&fcloop_lock, flags);
1247 spin_unlock_irqrestore(&fcloop_lock, flags);
1249 put_device(fcloop_device);
1251 device_destroy(fcloop_class, MKDEV(0, 0));
1252 class_destroy(fcloop_class);
1255 module_init(fcloop_init);
1256 module_exit(fcloop_exit);
1258 MODULE_LICENSE("GPL v2");