2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 256
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
46 struct list_head ls_list; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
56 struct scatterlist sg[2];
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
61 /* desired maximum for a single sequence - if sg list allows it */
62 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
64 enum nvmet_fcp_datadir {
71 struct nvmet_fc_fcp_iod {
72 struct nvmefc_tgt_fcp_req *fcpreq;
74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf;
77 struct scatterlist *next_sg;
78 struct scatterlist *data_sg;
81 enum nvmet_fcp_datadir io_dir;
89 struct work_struct work;
90 struct work_struct done_work;
91 struct work_struct defer_work;
93 struct nvmet_fc_tgtport *tgtport;
94 struct nvmet_fc_tgt_queue *queue;
96 struct list_head fcp_list; /* tgtport->fcp_list */
99 struct nvmet_fc_tgtport {
101 struct nvmet_fc_target_port fc_target_port;
103 struct list_head tgt_list; /* nvmet_fc_target_list */
104 struct device *dev; /* dev for dma mapping */
105 struct nvmet_fc_target_template *ops;
107 struct nvmet_fc_ls_iod *iod;
109 struct list_head ls_list;
110 struct list_head ls_busylist;
111 struct list_head assoc_list;
112 struct ida assoc_cnt;
113 struct nvmet_port *port;
118 struct nvmet_fc_defer_fcp_req {
119 struct list_head req_list;
120 struct nvmefc_tgt_fcp_req *fcp_req;
123 struct nvmet_fc_tgt_queue {
135 struct nvmet_port *port;
136 struct nvmet_cq nvme_cq;
137 struct nvmet_sq nvme_sq;
138 struct nvmet_fc_tgt_assoc *assoc;
139 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
140 struct list_head fod_list;
141 struct list_head pending_cmd_list;
142 struct list_head avail_defer_list;
143 struct workqueue_struct *work_q;
145 } __aligned(sizeof(unsigned long long));
147 struct nvmet_fc_tgt_assoc {
150 struct nvmet_fc_tgtport *tgtport;
151 struct list_head a_list;
152 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
154 struct work_struct del_work;
159 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
161 return (iodptr - iodptr->tgtport->iod);
165 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
167 return (fodptr - fodptr->queue->fod);
172 * Association and Connection IDs:
174 * Association ID will have random number in upper 6 bytes and zero
177 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
179 * note: Association ID = Connection ID for queue 0
181 #define BYTES_FOR_QID sizeof(u16)
182 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
183 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
186 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
188 return (assoc->association_id | qid);
192 nvmet_fc_getassociationid(u64 connectionid)
194 return connectionid & ~NVMET_FC_QUEUEID_MASK;
198 nvmet_fc_getqueueid(u64 connectionid)
200 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
203 static inline struct nvmet_fc_tgtport *
204 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
206 return container_of(targetport, struct nvmet_fc_tgtport,
210 static inline struct nvmet_fc_fcp_iod *
211 nvmet_req_to_fod(struct nvmet_req *nvme_req)
213 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
217 /* *************************** Globals **************************** */
220 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
222 static LIST_HEAD(nvmet_fc_target_list);
223 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
226 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
227 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
228 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
229 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
230 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
231 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
232 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
233 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
234 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
235 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
236 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
237 struct nvmet_fc_fcp_iod *fod);
238 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
241 /* *********************** FC-NVME DMA Handling **************************** */
244 * The fcloop device passes in a NULL device pointer. Real LLD's will
245 * pass in a valid device pointer. If NULL is passed to the dma mapping
246 * routines, depending on the platform, it may or may not succeed, and
250 * Wrapper all the dma routines and check the dev pointer.
252 * If simple mappings (return just a dma address, we'll noop them,
253 * returning a dma address of 0.
255 * On more complex mappings (dma_map_sg), a pseudo routine fills
256 * in the scatter list, setting all dma addresses to 0.
259 static inline dma_addr_t
260 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
261 enum dma_data_direction dir)
263 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
267 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
269 return dev ? dma_mapping_error(dev, dma_addr) : 0;
273 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
274 enum dma_data_direction dir)
277 dma_unmap_single(dev, addr, size, dir);
281 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
282 enum dma_data_direction dir)
285 dma_sync_single_for_cpu(dev, addr, size, dir);
289 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
290 enum dma_data_direction dir)
293 dma_sync_single_for_device(dev, addr, size, dir);
296 /* pseudo dma_map_sg call */
298 fc_map_sg(struct scatterlist *sg, int nents)
300 struct scatterlist *s;
303 WARN_ON(nents == 0 || sg[0].length == 0);
305 for_each_sg(sg, s, nents, i) {
307 #ifdef CONFIG_NEED_SG_DMA_LENGTH
308 s->dma_length = s->length;
315 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
316 enum dma_data_direction dir)
318 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
322 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
323 enum dma_data_direction dir)
326 dma_unmap_sg(dev, sg, nents, dir);
330 /* *********************** FC-NVME Port Management ************************ */
334 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
336 struct nvmet_fc_ls_iod *iod;
339 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
346 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
347 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
348 iod->tgtport = tgtport;
349 list_add_tail(&iod->ls_list, &tgtport->ls_list);
351 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
356 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
358 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
359 NVME_FC_MAX_LS_BUFFER_SIZE,
361 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
369 list_del(&iod->ls_list);
370 for (iod--, i--; i >= 0; iod--, i--) {
371 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
372 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
374 list_del(&iod->ls_list);
383 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
385 struct nvmet_fc_ls_iod *iod = tgtport->iod;
388 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
389 fc_dma_unmap_single(tgtport->dev,
390 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
393 list_del(&iod->ls_list);
398 static struct nvmet_fc_ls_iod *
399 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
401 struct nvmet_fc_ls_iod *iod;
404 spin_lock_irqsave(&tgtport->lock, flags);
405 iod = list_first_entry_or_null(&tgtport->ls_list,
406 struct nvmet_fc_ls_iod, ls_list);
408 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
409 spin_unlock_irqrestore(&tgtport->lock, flags);
415 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
416 struct nvmet_fc_ls_iod *iod)
420 spin_lock_irqsave(&tgtport->lock, flags);
421 list_move(&iod->ls_list, &tgtport->ls_list);
422 spin_unlock_irqrestore(&tgtport->lock, flags);
426 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
427 struct nvmet_fc_tgt_queue *queue)
429 struct nvmet_fc_fcp_iod *fod = queue->fod;
432 for (i = 0; i < queue->sqsize; fod++, i++) {
433 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
434 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
435 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
436 fod->tgtport = tgtport;
440 fod->aborted = false;
442 list_add_tail(&fod->fcp_list, &queue->fod_list);
443 spin_lock_init(&fod->flock);
445 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
446 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
447 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
448 list_del(&fod->fcp_list);
449 for (fod--, i--; i >= 0; fod--, i--) {
450 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
451 sizeof(fod->rspiubuf),
454 list_del(&fod->fcp_list);
463 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
464 struct nvmet_fc_tgt_queue *queue)
466 struct nvmet_fc_fcp_iod *fod = queue->fod;
469 for (i = 0; i < queue->sqsize; fod++, i++) {
471 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
472 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
476 static struct nvmet_fc_fcp_iod *
477 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
479 struct nvmet_fc_fcp_iod *fod;
481 lockdep_assert_held(&queue->qlock);
483 fod = list_first_entry_or_null(&queue->fod_list,
484 struct nvmet_fc_fcp_iod, fcp_list);
486 list_del(&fod->fcp_list);
489 * no queue reference is taken, as it was taken by the
490 * queue lookup just prior to the allocation. The iod
491 * will "inherit" that reference.
499 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
500 struct nvmet_fc_tgt_queue *queue,
501 struct nvmefc_tgt_fcp_req *fcpreq)
503 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
506 * put all admin cmds on hw queue id 0. All io commands go to
507 * the respective hw queue based on a modulo basis
509 fcpreq->hwqid = queue->qid ?
510 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
512 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
513 queue_work_on(queue->cpu, queue->work_q, &fod->work);
515 nvmet_fc_handle_fcp_rqst(tgtport, fod);
519 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
521 struct nvmet_fc_fcp_iod *fod =
522 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
524 /* Submit deferred IO for processing */
525 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
530 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
531 struct nvmet_fc_fcp_iod *fod)
533 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
534 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
535 struct nvmet_fc_defer_fcp_req *deferfcp;
538 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
539 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
541 fcpreq->nvmet_fc_private = NULL;
545 fod->aborted = false;
546 fod->writedataactive = false;
549 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
551 /* release the queue lookup reference on the completed IO */
552 nvmet_fc_tgt_q_put(queue);
554 spin_lock_irqsave(&queue->qlock, flags);
555 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
556 struct nvmet_fc_defer_fcp_req, req_list);
558 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
559 spin_unlock_irqrestore(&queue->qlock, flags);
563 /* Re-use the fod for the next pending cmd that was deferred */
564 list_del(&deferfcp->req_list);
566 fcpreq = deferfcp->fcp_req;
568 /* deferfcp can be reused for another IO at a later date */
569 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
571 spin_unlock_irqrestore(&queue->qlock, flags);
573 /* Save NVME CMD IO in fod */
574 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
576 /* Setup new fcpreq to be processed */
577 fcpreq->rspaddr = NULL;
579 fcpreq->nvmet_fc_private = fod;
580 fod->fcpreq = fcpreq;
583 /* inform LLDD IO is now being processed */
584 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
587 * Leave the queue lookup get reference taken when
588 * fod was originally allocated.
591 queue_work(queue->work_q, &fod->defer_work);
595 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
599 if (tgtport->ops->max_hw_queues == 1)
600 return WORK_CPU_UNBOUND;
602 /* Simple cpu selection based on qid modulo active cpu count */
603 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
605 /* find the n'th active cpu */
606 for (cpu = 0, cnt = 0; ; ) {
607 if (cpu_active(cpu)) {
612 cpu = (cpu + 1) % num_possible_cpus();
618 static struct nvmet_fc_tgt_queue *
619 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
622 struct nvmet_fc_tgt_queue *queue;
626 if (qid > NVMET_NR_QUEUES)
629 queue = kzalloc((sizeof(*queue) +
630 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
635 if (!nvmet_fc_tgt_a_get(assoc))
638 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
639 assoc->tgtport->fc_target_port.port_num,
644 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
646 queue->sqsize = sqsize;
647 queue->assoc = assoc;
648 queue->port = assoc->tgtport->port;
649 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
650 INIT_LIST_HEAD(&queue->fod_list);
651 INIT_LIST_HEAD(&queue->avail_defer_list);
652 INIT_LIST_HEAD(&queue->pending_cmd_list);
653 atomic_set(&queue->connected, 0);
654 atomic_set(&queue->sqtail, 0);
655 atomic_set(&queue->rsn, 1);
656 atomic_set(&queue->zrspcnt, 0);
657 spin_lock_init(&queue->qlock);
658 kref_init(&queue->ref);
660 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
662 ret = nvmet_sq_init(&queue->nvme_sq);
664 goto out_fail_iodlist;
666 WARN_ON(assoc->queues[qid]);
667 spin_lock_irqsave(&assoc->tgtport->lock, flags);
668 assoc->queues[qid] = queue;
669 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
674 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
675 destroy_workqueue(queue->work_q);
677 nvmet_fc_tgt_a_put(assoc);
685 nvmet_fc_tgt_queue_free(struct kref *ref)
687 struct nvmet_fc_tgt_queue *queue =
688 container_of(ref, struct nvmet_fc_tgt_queue, ref);
691 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
692 queue->assoc->queues[queue->qid] = NULL;
693 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
695 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
697 nvmet_fc_tgt_a_put(queue->assoc);
699 destroy_workqueue(queue->work_q);
705 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
707 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
711 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
713 return kref_get_unless_zero(&queue->ref);
718 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
720 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
721 struct nvmet_fc_fcp_iod *fod = queue->fod;
722 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
724 int i, writedataactive;
727 disconnect = atomic_xchg(&queue->connected, 0);
729 spin_lock_irqsave(&queue->qlock, flags);
730 /* about outstanding io's */
731 for (i = 0; i < queue->sqsize; fod++, i++) {
733 spin_lock(&fod->flock);
735 writedataactive = fod->writedataactive;
736 spin_unlock(&fod->flock);
738 * only call lldd abort routine if waiting for
739 * writedata. other outstanding ops should finish
742 if (writedataactive) {
743 spin_lock(&fod->flock);
745 spin_unlock(&fod->flock);
746 tgtport->ops->fcp_abort(
747 &tgtport->fc_target_port, fod->fcpreq);
752 /* Cleanup defer'ed IOs in queue */
753 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
755 list_del(&deferfcp->req_list);
760 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
761 struct nvmet_fc_defer_fcp_req, req_list);
765 list_del(&deferfcp->req_list);
766 spin_unlock_irqrestore(&queue->qlock, flags);
768 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
771 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
774 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
777 /* release the queue lookup reference */
778 nvmet_fc_tgt_q_put(queue);
782 spin_lock_irqsave(&queue->qlock, flags);
784 spin_unlock_irqrestore(&queue->qlock, flags);
786 flush_workqueue(queue->work_q);
789 nvmet_sq_destroy(&queue->nvme_sq);
791 nvmet_fc_tgt_q_put(queue);
794 static struct nvmet_fc_tgt_queue *
795 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
798 struct nvmet_fc_tgt_assoc *assoc;
799 struct nvmet_fc_tgt_queue *queue;
800 u64 association_id = nvmet_fc_getassociationid(connection_id);
801 u16 qid = nvmet_fc_getqueueid(connection_id);
804 if (qid > NVMET_NR_QUEUES)
807 spin_lock_irqsave(&tgtport->lock, flags);
808 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
809 if (association_id == assoc->association_id) {
810 queue = assoc->queues[qid];
812 (!atomic_read(&queue->connected) ||
813 !nvmet_fc_tgt_q_get(queue)))
815 spin_unlock_irqrestore(&tgtport->lock, flags);
819 spin_unlock_irqrestore(&tgtport->lock, flags);
824 nvmet_fc_delete_assoc(struct work_struct *work)
826 struct nvmet_fc_tgt_assoc *assoc =
827 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
829 nvmet_fc_delete_target_assoc(assoc);
830 nvmet_fc_tgt_a_put(assoc);
833 static struct nvmet_fc_tgt_assoc *
834 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
836 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
840 bool needrandom = true;
842 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
846 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
850 if (!nvmet_fc_tgtport_get(tgtport))
853 assoc->tgtport = tgtport;
855 INIT_LIST_HEAD(&assoc->a_list);
856 kref_init(&assoc->ref);
857 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
860 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
861 ran = ran << BYTES_FOR_QID_SHIFT;
863 spin_lock_irqsave(&tgtport->lock, flags);
865 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
866 if (ran == tmpassoc->association_id) {
871 assoc->association_id = ran;
872 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
874 spin_unlock_irqrestore(&tgtport->lock, flags);
880 ida_simple_remove(&tgtport->assoc_cnt, idx);
887 nvmet_fc_target_assoc_free(struct kref *ref)
889 struct nvmet_fc_tgt_assoc *assoc =
890 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
891 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
894 spin_lock_irqsave(&tgtport->lock, flags);
895 list_del(&assoc->a_list);
896 spin_unlock_irqrestore(&tgtport->lock, flags);
897 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
899 nvmet_fc_tgtport_put(tgtport);
903 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
905 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
909 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
911 return kref_get_unless_zero(&assoc->ref);
915 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
917 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
918 struct nvmet_fc_tgt_queue *queue;
922 spin_lock_irqsave(&tgtport->lock, flags);
923 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
924 queue = assoc->queues[i];
926 if (!nvmet_fc_tgt_q_get(queue))
928 spin_unlock_irqrestore(&tgtport->lock, flags);
929 nvmet_fc_delete_target_queue(queue);
930 nvmet_fc_tgt_q_put(queue);
931 spin_lock_irqsave(&tgtport->lock, flags);
934 spin_unlock_irqrestore(&tgtport->lock, flags);
936 nvmet_fc_tgt_a_put(assoc);
939 static struct nvmet_fc_tgt_assoc *
940 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
943 struct nvmet_fc_tgt_assoc *assoc;
944 struct nvmet_fc_tgt_assoc *ret = NULL;
947 spin_lock_irqsave(&tgtport->lock, flags);
948 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
949 if (association_id == assoc->association_id) {
951 nvmet_fc_tgt_a_get(assoc);
955 spin_unlock_irqrestore(&tgtport->lock, flags);
962 * nvme_fc_register_targetport - transport entry point called by an
963 * LLDD to register the existence of a local
964 * NVME subystem FC port.
965 * @pinfo: pointer to information about the port to be registered
966 * @template: LLDD entrypoints and operational parameters for the port
967 * @dev: physical hardware device node port corresponds to. Will be
968 * used for DMA mappings
969 * @portptr: pointer to a local port pointer. Upon success, the routine
970 * will allocate a nvme_fc_local_port structure and place its
971 * address in the local port pointer. Upon failure, local port
972 * pointer will be set to NULL.
975 * a completion status. Must be 0 upon success; a negative errno
976 * (ex: -ENXIO) upon failure.
979 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
980 struct nvmet_fc_target_template *template,
982 struct nvmet_fc_target_port **portptr)
984 struct nvmet_fc_tgtport *newrec;
988 if (!template->xmt_ls_rsp || !template->fcp_op ||
989 !template->fcp_abort ||
990 !template->fcp_req_release || !template->targetport_delete ||
991 !template->max_hw_queues || !template->max_sgl_segments ||
992 !template->max_dif_sgl_segments || !template->dma_boundary) {
994 goto out_regtgt_failed;
997 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1001 goto out_regtgt_failed;
1004 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1007 goto out_fail_kfree;
1010 if (!get_device(dev) && dev) {
1015 newrec->fc_target_port.node_name = pinfo->node_name;
1016 newrec->fc_target_port.port_name = pinfo->port_name;
1017 newrec->fc_target_port.private = &newrec[1];
1018 newrec->fc_target_port.port_id = pinfo->port_id;
1019 newrec->fc_target_port.port_num = idx;
1020 INIT_LIST_HEAD(&newrec->tgt_list);
1022 newrec->ops = template;
1023 spin_lock_init(&newrec->lock);
1024 INIT_LIST_HEAD(&newrec->ls_list);
1025 INIT_LIST_HEAD(&newrec->ls_busylist);
1026 INIT_LIST_HEAD(&newrec->assoc_list);
1027 kref_init(&newrec->ref);
1028 ida_init(&newrec->assoc_cnt);
1029 newrec->max_sg_cnt = template->max_sgl_segments;
1031 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1034 goto out_free_newrec;
1037 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1038 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1039 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1041 *portptr = &newrec->fc_target_port;
1047 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1054 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1058 nvmet_fc_free_tgtport(struct kref *ref)
1060 struct nvmet_fc_tgtport *tgtport =
1061 container_of(ref, struct nvmet_fc_tgtport, ref);
1062 struct device *dev = tgtport->dev;
1063 unsigned long flags;
1065 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1066 list_del(&tgtport->tgt_list);
1067 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1069 nvmet_fc_free_ls_iodlist(tgtport);
1071 /* let the LLDD know we've finished tearing it down */
1072 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1074 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1075 tgtport->fc_target_port.port_num);
1077 ida_destroy(&tgtport->assoc_cnt);
1085 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1087 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1091 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1093 return kref_get_unless_zero(&tgtport->ref);
1097 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1099 struct nvmet_fc_tgt_assoc *assoc, *next;
1100 unsigned long flags;
1102 spin_lock_irqsave(&tgtport->lock, flags);
1103 list_for_each_entry_safe(assoc, next,
1104 &tgtport->assoc_list, a_list) {
1105 if (!nvmet_fc_tgt_a_get(assoc))
1107 spin_unlock_irqrestore(&tgtport->lock, flags);
1108 nvmet_fc_delete_target_assoc(assoc);
1109 nvmet_fc_tgt_a_put(assoc);
1110 spin_lock_irqsave(&tgtport->lock, flags);
1112 spin_unlock_irqrestore(&tgtport->lock, flags);
1116 * nvmet layer has called to terminate an association
1119 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1121 struct nvmet_fc_tgtport *tgtport, *next;
1122 struct nvmet_fc_tgt_assoc *assoc;
1123 struct nvmet_fc_tgt_queue *queue;
1124 unsigned long flags;
1125 bool found_ctrl = false;
1127 /* this is a bit ugly, but don't want to make locks layered */
1128 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1129 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1131 if (!nvmet_fc_tgtport_get(tgtport))
1133 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1135 spin_lock_irqsave(&tgtport->lock, flags);
1136 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1137 queue = assoc->queues[0];
1138 if (queue && queue->nvme_sq.ctrl == ctrl) {
1139 if (nvmet_fc_tgt_a_get(assoc))
1144 spin_unlock_irqrestore(&tgtport->lock, flags);
1146 nvmet_fc_tgtport_put(tgtport);
1149 schedule_work(&assoc->del_work);
1153 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1155 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1159 * nvme_fc_unregister_targetport - transport entry point called by an
1160 * LLDD to deregister/remove a previously
1161 * registered a local NVME subsystem FC port.
1162 * @tgtport: pointer to the (registered) target port that is to be
1166 * a completion status. Must be 0 upon success; a negative errno
1167 * (ex: -ENXIO) upon failure.
1170 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1172 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1174 /* terminate any outstanding associations */
1175 __nvmet_fc_free_assocs(tgtport);
1177 nvmet_fc_tgtport_put(tgtport);
1181 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1184 /* *********************** FC-NVME LS Handling **************************** */
1188 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1190 struct fcnvme_ls_acc_hdr *acc = buf;
1192 acc->w0.ls_cmd = ls_cmd;
1193 acc->desc_list_len = desc_len;
1194 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1195 acc->rqst.desc_len =
1196 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1197 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1201 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1202 u8 reason, u8 explanation, u8 vendor)
1204 struct fcnvme_ls_rjt *rjt = buf;
1206 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1207 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1209 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1210 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1211 rjt->rjt.reason_code = reason;
1212 rjt->rjt.reason_explanation = explanation;
1213 rjt->rjt.vendor = vendor;
1215 return sizeof(struct fcnvme_ls_rjt);
1218 /* Validation Error indexes into the string table below */
1221 VERR_CR_ASSOC_LEN = 1,
1222 VERR_CR_ASSOC_RQST_LEN = 2,
1223 VERR_CR_ASSOC_CMD = 3,
1224 VERR_CR_ASSOC_CMD_LEN = 4,
1225 VERR_ERSP_RATIO = 5,
1226 VERR_ASSOC_ALLOC_FAIL = 6,
1227 VERR_QUEUE_ALLOC_FAIL = 7,
1228 VERR_CR_CONN_LEN = 8,
1229 VERR_CR_CONN_RQST_LEN = 9,
1231 VERR_ASSOC_ID_LEN = 11,
1234 VERR_CONN_ID_LEN = 14,
1236 VERR_CR_CONN_CMD = 16,
1237 VERR_CR_CONN_CMD_LEN = 17,
1238 VERR_DISCONN_LEN = 18,
1239 VERR_DISCONN_RQST_LEN = 19,
1240 VERR_DISCONN_CMD = 20,
1241 VERR_DISCONN_CMD_LEN = 21,
1242 VERR_DISCONN_SCOPE = 22,
1244 VERR_RS_RQST_LEN = 24,
1246 VERR_RS_CMD_LEN = 26,
1251 static char *validation_errors[] = {
1253 "Bad CR_ASSOC Length",
1254 "Bad CR_ASSOC Rqst Length",
1256 "Bad CR_ASSOC Cmd Length",
1258 "Association Allocation Failed",
1259 "Queue Allocation Failed",
1260 "Bad CR_CONN Length",
1261 "Bad CR_CONN Rqst Length",
1262 "Not Association ID",
1263 "Bad Association ID Length",
1265 "Not Connection ID",
1266 "Bad Connection ID Length",
1269 "Bad CR_CONN Cmd Length",
1270 "Bad DISCONN Length",
1271 "Bad DISCONN Rqst Length",
1273 "Bad DISCONN Cmd Length",
1274 "Bad Disconnect Scope",
1276 "Bad RS Rqst Length",
1278 "Bad RS Cmd Length",
1280 "Bad RS Relative Offset",
1284 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1285 struct nvmet_fc_ls_iod *iod)
1287 struct fcnvme_ls_cr_assoc_rqst *rqst =
1288 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1289 struct fcnvme_ls_cr_assoc_acc *acc =
1290 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1291 struct nvmet_fc_tgt_queue *queue;
1294 memset(acc, 0, sizeof(*acc));
1297 * FC-NVME spec changes. There are initiators sending different
1298 * lengths as padding sizes for Create Association Cmd descriptor
1300 * Accept anything of "minimum" length. Assume format per 1.15
1301 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1302 * trailing pad length is.
1304 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1305 ret = VERR_CR_ASSOC_LEN;
1306 else if (be32_to_cpu(rqst->desc_list_len) <
1307 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1308 ret = VERR_CR_ASSOC_RQST_LEN;
1309 else if (rqst->assoc_cmd.desc_tag !=
1310 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1311 ret = VERR_CR_ASSOC_CMD;
1312 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1313 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1314 ret = VERR_CR_ASSOC_CMD_LEN;
1315 else if (!rqst->assoc_cmd.ersp_ratio ||
1316 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1317 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1318 ret = VERR_ERSP_RATIO;
1321 /* new association w/ admin queue */
1322 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1324 ret = VERR_ASSOC_ALLOC_FAIL;
1326 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1327 be16_to_cpu(rqst->assoc_cmd.sqsize));
1329 ret = VERR_QUEUE_ALLOC_FAIL;
1330 nvmet_fc_tgt_a_put(iod->assoc);
1336 dev_err(tgtport->dev,
1337 "Create Association LS failed: %s\n",
1338 validation_errors[ret]);
1339 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1340 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1341 FCNVME_RJT_RC_LOGIC,
1342 FCNVME_RJT_EXP_NONE, 0);
1346 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1347 atomic_set(&queue->connected, 1);
1348 queue->sqhd = 0; /* best place to init value */
1350 /* format a response */
1352 iod->lsreq->rsplen = sizeof(*acc);
1354 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1356 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1357 FCNVME_LS_CREATE_ASSOCIATION);
1358 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1359 acc->associd.desc_len =
1361 sizeof(struct fcnvme_lsdesc_assoc_id));
1362 acc->associd.association_id =
1363 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1364 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1365 acc->connectid.desc_len =
1367 sizeof(struct fcnvme_lsdesc_conn_id));
1368 acc->connectid.connection_id = acc->associd.association_id;
1372 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1373 struct nvmet_fc_ls_iod *iod)
1375 struct fcnvme_ls_cr_conn_rqst *rqst =
1376 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1377 struct fcnvme_ls_cr_conn_acc *acc =
1378 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1379 struct nvmet_fc_tgt_queue *queue;
1382 memset(acc, 0, sizeof(*acc));
1384 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1385 ret = VERR_CR_CONN_LEN;
1386 else if (rqst->desc_list_len !=
1388 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1389 ret = VERR_CR_CONN_RQST_LEN;
1390 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1391 ret = VERR_ASSOC_ID;
1392 else if (rqst->associd.desc_len !=
1394 sizeof(struct fcnvme_lsdesc_assoc_id)))
1395 ret = VERR_ASSOC_ID_LEN;
1396 else if (rqst->connect_cmd.desc_tag !=
1397 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1398 ret = VERR_CR_CONN_CMD;
1399 else if (rqst->connect_cmd.desc_len !=
1401 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1402 ret = VERR_CR_CONN_CMD_LEN;
1403 else if (!rqst->connect_cmd.ersp_ratio ||
1404 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1405 be16_to_cpu(rqst->connect_cmd.sqsize)))
1406 ret = VERR_ERSP_RATIO;
1410 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1411 be64_to_cpu(rqst->associd.association_id));
1413 ret = VERR_NO_ASSOC;
1415 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1416 be16_to_cpu(rqst->connect_cmd.qid),
1417 be16_to_cpu(rqst->connect_cmd.sqsize));
1419 ret = VERR_QUEUE_ALLOC_FAIL;
1421 /* release get taken in nvmet_fc_find_target_assoc */
1422 nvmet_fc_tgt_a_put(iod->assoc);
1427 dev_err(tgtport->dev,
1428 "Create Connection LS failed: %s\n",
1429 validation_errors[ret]);
1430 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1431 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1432 (ret == VERR_NO_ASSOC) ?
1433 FCNVME_RJT_RC_INV_ASSOC :
1434 FCNVME_RJT_RC_LOGIC,
1435 FCNVME_RJT_EXP_NONE, 0);
1439 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1440 atomic_set(&queue->connected, 1);
1441 queue->sqhd = 0; /* best place to init value */
1443 /* format a response */
1445 iod->lsreq->rsplen = sizeof(*acc);
1447 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1448 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1449 FCNVME_LS_CREATE_CONNECTION);
1450 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1451 acc->connectid.desc_len =
1453 sizeof(struct fcnvme_lsdesc_conn_id));
1454 acc->connectid.connection_id =
1455 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1456 be16_to_cpu(rqst->connect_cmd.qid)));
1460 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1461 struct nvmet_fc_ls_iod *iod)
1463 struct fcnvme_ls_disconnect_rqst *rqst =
1464 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1465 struct fcnvme_ls_disconnect_acc *acc =
1466 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1467 struct nvmet_fc_tgt_queue *queue = NULL;
1468 struct nvmet_fc_tgt_assoc *assoc;
1470 bool del_assoc = false;
1472 memset(acc, 0, sizeof(*acc));
1474 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1475 ret = VERR_DISCONN_LEN;
1476 else if (rqst->desc_list_len !=
1478 sizeof(struct fcnvme_ls_disconnect_rqst)))
1479 ret = VERR_DISCONN_RQST_LEN;
1480 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1481 ret = VERR_ASSOC_ID;
1482 else if (rqst->associd.desc_len !=
1484 sizeof(struct fcnvme_lsdesc_assoc_id)))
1485 ret = VERR_ASSOC_ID_LEN;
1486 else if (rqst->discon_cmd.desc_tag !=
1487 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1488 ret = VERR_DISCONN_CMD;
1489 else if (rqst->discon_cmd.desc_len !=
1491 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1492 ret = VERR_DISCONN_CMD_LEN;
1493 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1494 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1495 ret = VERR_DISCONN_SCOPE;
1497 /* match an active association */
1498 assoc = nvmet_fc_find_target_assoc(tgtport,
1499 be64_to_cpu(rqst->associd.association_id));
1502 if (rqst->discon_cmd.scope ==
1503 FCNVME_DISCONN_CONNECTION) {
1504 queue = nvmet_fc_find_target_queue(tgtport,
1506 rqst->discon_cmd.id));
1508 nvmet_fc_tgt_a_put(assoc);
1513 ret = VERR_NO_ASSOC;
1517 dev_err(tgtport->dev,
1518 "Disconnect LS failed: %s\n",
1519 validation_errors[ret]);
1520 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1521 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1522 (ret == VERR_NO_ASSOC) ?
1523 FCNVME_RJT_RC_INV_ASSOC :
1524 (ret == VERR_NO_CONN) ?
1525 FCNVME_RJT_RC_INV_CONN :
1526 FCNVME_RJT_RC_LOGIC,
1527 FCNVME_RJT_EXP_NONE, 0);
1531 /* format a response */
1533 iod->lsreq->rsplen = sizeof(*acc);
1535 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1537 sizeof(struct fcnvme_ls_disconnect_acc)),
1538 FCNVME_LS_DISCONNECT);
1541 /* are we to delete a Connection ID (queue) */
1543 int qid = queue->qid;
1545 nvmet_fc_delete_target_queue(queue);
1547 /* release the get taken by find_target_queue */
1548 nvmet_fc_tgt_q_put(queue);
1550 /* tear association down if io queue terminated */
1555 /* release get taken in nvmet_fc_find_target_assoc */
1556 nvmet_fc_tgt_a_put(iod->assoc);
1559 nvmet_fc_delete_target_assoc(iod->assoc);
1563 /* *********************** NVME Ctrl Routines **************************** */
1566 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1568 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1571 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1573 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1574 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1576 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1577 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1578 nvmet_fc_free_ls_iod(tgtport, iod);
1579 nvmet_fc_tgtport_put(tgtport);
1583 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1584 struct nvmet_fc_ls_iod *iod)
1588 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1589 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1591 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1593 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1597 * Actual processing routine for received FC-NVME LS Requests from the LLD
1600 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1601 struct nvmet_fc_ls_iod *iod)
1603 struct fcnvme_ls_rqst_w0 *w0 =
1604 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1606 iod->lsreq->nvmet_fc_private = iod;
1607 iod->lsreq->rspbuf = iod->rspbuf;
1608 iod->lsreq->rspdma = iod->rspdma;
1609 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1610 /* Be preventative. handlers will later set to valid length */
1611 iod->lsreq->rsplen = 0;
1617 * parse request input, execute the request, and format the
1620 switch (w0->ls_cmd) {
1621 case FCNVME_LS_CREATE_ASSOCIATION:
1622 /* Creates Association and initial Admin Queue/Connection */
1623 nvmet_fc_ls_create_association(tgtport, iod);
1625 case FCNVME_LS_CREATE_CONNECTION:
1626 /* Creates an IO Queue/Connection */
1627 nvmet_fc_ls_create_connection(tgtport, iod);
1629 case FCNVME_LS_DISCONNECT:
1630 /* Terminate a Queue/Connection or the Association */
1631 nvmet_fc_ls_disconnect(tgtport, iod);
1634 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1635 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1636 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1639 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1643 * Actual processing routine for received FC-NVME LS Requests from the LLD
1646 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1648 struct nvmet_fc_ls_iod *iod =
1649 container_of(work, struct nvmet_fc_ls_iod, work);
1650 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1652 nvmet_fc_handle_ls_rqst(tgtport, iod);
1657 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1658 * upon the reception of a NVME LS request.
1660 * The nvmet-fc layer will copy payload to an internal structure for
1661 * processing. As such, upon completion of the routine, the LLDD may
1662 * immediately free/reuse the LS request buffer passed in the call.
1664 * If this routine returns error, the LLDD should abort the exchange.
1666 * @tgtport: pointer to the (registered) target port the LS was
1668 * @lsreq: pointer to a lsreq request structure to be used to reference
1669 * the exchange corresponding to the LS.
1670 * @lsreqbuf: pointer to the buffer containing the LS Request
1671 * @lsreqbuf_len: length, in bytes, of the received LS request
1674 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1675 struct nvmefc_tgt_ls_req *lsreq,
1676 void *lsreqbuf, u32 lsreqbuf_len)
1678 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1679 struct nvmet_fc_ls_iod *iod;
1681 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1684 if (!nvmet_fc_tgtport_get(tgtport))
1687 iod = nvmet_fc_alloc_ls_iod(tgtport);
1689 nvmet_fc_tgtport_put(tgtport);
1695 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1696 iod->rqstdatalen = lsreqbuf_len;
1698 schedule_work(&iod->work);
1702 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1706 * **********************
1707 * Start of FCP handling
1708 * **********************
1712 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1714 struct scatterlist *sg;
1717 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1722 fod->data_sg_cnt = nent;
1723 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1724 ((fod->io_dir == NVMET_FCP_WRITE) ?
1725 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1726 /* note: write from initiator perspective */
1727 fod->next_sg = fod->data_sg;
1732 return NVME_SC_INTERNAL;
1736 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1738 if (!fod->data_sg || !fod->data_sg_cnt)
1741 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1742 ((fod->io_dir == NVMET_FCP_WRITE) ?
1743 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1744 sgl_free(fod->data_sg);
1745 fod->data_sg = NULL;
1746 fod->data_sg_cnt = 0;
1751 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1755 /* egad, this is ugly. And sqtail is just a best guess */
1756 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1758 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1759 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1764 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1767 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1768 struct nvmet_fc_fcp_iod *fod)
1770 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1771 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1772 struct nvme_completion *cqe = &ersp->cqe;
1773 u32 *cqewd = (u32 *)cqe;
1774 bool send_ersp = false;
1775 u32 rsn, rspcnt, xfr_length;
1777 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1778 xfr_length = fod->req.transfer_len;
1780 xfr_length = fod->offset;
1783 * check to see if we can send a 0's rsp.
1784 * Note: to send a 0's response, the NVME-FC host transport will
1785 * recreate the CQE. The host transport knows: sq id, SQHD (last
1786 * seen in an ersp), and command_id. Thus it will create a
1787 * zero-filled CQE with those known fields filled in. Transport
1788 * must send an ersp for any condition where the cqe won't match
1791 * Here are the FC-NVME mandated cases where we must send an ersp:
1792 * every N responses, where N=ersp_ratio
1793 * force fabric commands to send ersp's (not in FC-NVME but good
1795 * normal cmds: any time status is non-zero, or status is zero
1796 * but words 0 or 1 are non-zero.
1797 * the SQ is 90% or more full
1798 * the cmd is a fused command
1799 * transferred data length not equal to cmd iu length
1801 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1802 if (!(rspcnt % fod->queue->ersp_ratio) ||
1803 sqe->opcode == nvme_fabrics_command ||
1804 xfr_length != fod->req.transfer_len ||
1805 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1806 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1807 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1810 /* re-set the fields */
1811 fod->fcpreq->rspaddr = ersp;
1812 fod->fcpreq->rspdma = fod->rspdma;
1815 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1816 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1818 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1819 rsn = atomic_inc_return(&fod->queue->rsn);
1820 ersp->rsn = cpu_to_be32(rsn);
1821 ersp->xfrd_len = cpu_to_be32(xfr_length);
1822 fod->fcpreq->rsplen = sizeof(*ersp);
1825 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1826 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1829 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1832 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1833 struct nvmet_fc_fcp_iod *fod)
1835 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1837 /* data no longer needed */
1838 nvmet_fc_free_tgt_pgs(fod);
1841 * if an ABTS was received or we issued the fcp_abort early
1842 * don't call abort routine again.
1844 /* no need to take lock - lock was taken earlier to get here */
1846 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1848 nvmet_fc_free_fcp_iod(fod->queue, fod);
1852 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1853 struct nvmet_fc_fcp_iod *fod)
1857 fod->fcpreq->op = NVMET_FCOP_RSP;
1858 fod->fcpreq->timeout = 0;
1860 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1862 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1864 nvmet_fc_abort_op(tgtport, fod);
1868 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1869 struct nvmet_fc_fcp_iod *fod, u8 op)
1871 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1872 struct scatterlist *sg = fod->next_sg;
1873 unsigned long flags;
1874 u32 remaininglen = fod->req.transfer_len - fod->offset;
1879 fcpreq->offset = fod->offset;
1880 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1883 * for next sequence:
1884 * break at a sg element boundary
1885 * attempt to keep sequence length capped at
1886 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1887 * be longer if a single sg element is larger
1888 * than that amount. This is done to avoid creating
1889 * a new sg list to use for the tgtport api.
1893 while (tlen < remaininglen &&
1894 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1895 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1897 tlen += sg_dma_len(sg);
1900 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1902 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1905 if (tlen < remaininglen)
1908 fod->next_sg = NULL;
1910 fcpreq->transfer_length = tlen;
1911 fcpreq->transferred_length = 0;
1912 fcpreq->fcp_error = 0;
1916 * If the last READDATA request: check if LLDD supports
1917 * combined xfr with response.
1919 if ((op == NVMET_FCOP_READDATA) &&
1920 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1921 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1922 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1923 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1926 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1929 * should be ok to set w/o lock as its in the thread of
1930 * execution (not an async timer routine) and doesn't
1931 * contend with any clearing action
1935 if (op == NVMET_FCOP_WRITEDATA) {
1936 spin_lock_irqsave(&fod->flock, flags);
1937 fod->writedataactive = false;
1938 spin_unlock_irqrestore(&fod->flock, flags);
1939 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1940 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1941 fcpreq->fcp_error = ret;
1942 fcpreq->transferred_length = 0;
1943 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1949 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1951 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1952 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1954 /* if in the middle of an io and we need to tear down */
1956 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1957 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1961 nvmet_fc_abort_op(tgtport, fod);
1969 * actual done handler for FCP operations when completed by the lldd
1972 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1974 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1975 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1976 unsigned long flags;
1979 spin_lock_irqsave(&fod->flock, flags);
1981 fod->writedataactive = false;
1982 spin_unlock_irqrestore(&fod->flock, flags);
1984 switch (fcpreq->op) {
1986 case NVMET_FCOP_WRITEDATA:
1987 if (__nvmet_fc_fod_op_abort(fod, abort))
1989 if (fcpreq->fcp_error ||
1990 fcpreq->transferred_length != fcpreq->transfer_length) {
1991 spin_lock_irqsave(&fod->flock, flags);
1993 spin_unlock_irqrestore(&fod->flock, flags);
1995 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1999 fod->offset += fcpreq->transferred_length;
2000 if (fod->offset != fod->req.transfer_len) {
2001 spin_lock_irqsave(&fod->flock, flags);
2002 fod->writedataactive = true;
2003 spin_unlock_irqrestore(&fod->flock, flags);
2005 /* transfer the next chunk */
2006 nvmet_fc_transfer_fcp_data(tgtport, fod,
2007 NVMET_FCOP_WRITEDATA);
2011 /* data transfer complete, resume with nvmet layer */
2012 nvmet_req_execute(&fod->req);
2015 case NVMET_FCOP_READDATA:
2016 case NVMET_FCOP_READDATA_RSP:
2017 if (__nvmet_fc_fod_op_abort(fod, abort))
2019 if (fcpreq->fcp_error ||
2020 fcpreq->transferred_length != fcpreq->transfer_length) {
2021 nvmet_fc_abort_op(tgtport, fod);
2027 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2028 /* data no longer needed */
2029 nvmet_fc_free_tgt_pgs(fod);
2030 nvmet_fc_free_fcp_iod(fod->queue, fod);
2034 fod->offset += fcpreq->transferred_length;
2035 if (fod->offset != fod->req.transfer_len) {
2036 /* transfer the next chunk */
2037 nvmet_fc_transfer_fcp_data(tgtport, fod,
2038 NVMET_FCOP_READDATA);
2042 /* data transfer complete, send response */
2044 /* data no longer needed */
2045 nvmet_fc_free_tgt_pgs(fod);
2047 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2051 case NVMET_FCOP_RSP:
2052 if (__nvmet_fc_fod_op_abort(fod, abort))
2054 nvmet_fc_free_fcp_iod(fod->queue, fod);
2063 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
2065 struct nvmet_fc_fcp_iod *fod =
2066 container_of(work, struct nvmet_fc_fcp_iod, done_work);
2068 nvmet_fc_fod_op_done(fod);
2072 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2074 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2075 struct nvmet_fc_tgt_queue *queue = fod->queue;
2077 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
2078 /* context switch so completion is not in ISR context */
2079 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
2081 nvmet_fc_fod_op_done(fod);
2085 * actual completion handler after execution by the nvmet layer
2088 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2089 struct nvmet_fc_fcp_iod *fod, int status)
2091 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2092 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2093 unsigned long flags;
2096 spin_lock_irqsave(&fod->flock, flags);
2098 spin_unlock_irqrestore(&fod->flock, flags);
2100 /* if we have a CQE, snoop the last sq_head value */
2102 fod->queue->sqhd = cqe->sq_head;
2105 nvmet_fc_abort_op(tgtport, fod);
2109 /* if an error handling the cmd post initial parsing */
2111 /* fudge up a failed CQE status for our transport error */
2112 memset(cqe, 0, sizeof(*cqe));
2113 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2114 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2115 cqe->command_id = sqe->command_id;
2116 cqe->status = cpu_to_le16(status);
2120 * try to push the data even if the SQE status is non-zero.
2121 * There may be a status where data still was intended to
2124 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2125 /* push the data over before sending rsp */
2126 nvmet_fc_transfer_fcp_data(tgtport, fod,
2127 NVMET_FCOP_READDATA);
2131 /* writes & no data - fall thru */
2134 /* data no longer needed */
2135 nvmet_fc_free_tgt_pgs(fod);
2137 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2142 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2144 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2145 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2147 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2152 * Actual processing routine for received FC-NVME LS Requests from the LLD
2155 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2156 struct nvmet_fc_fcp_iod *fod)
2158 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2159 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2163 * Fused commands are currently not supported in the linux
2166 * As such, the implementation of the FC transport does not
2167 * look at the fused commands and order delivery to the upper
2168 * layer until we have both based on csn.
2171 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2173 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2174 fod->io_dir = NVMET_FCP_WRITE;
2175 if (!nvme_is_write(&cmdiu->sqe))
2176 goto transport_error;
2177 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2178 fod->io_dir = NVMET_FCP_READ;
2179 if (nvme_is_write(&cmdiu->sqe))
2180 goto transport_error;
2182 fod->io_dir = NVMET_FCP_NODATA;
2184 goto transport_error;
2187 fod->req.cmd = &fod->cmdiubuf.sqe;
2188 fod->req.rsp = &fod->rspiubuf.cqe;
2189 fod->req.port = fod->queue->port;
2191 /* clear any response payload */
2192 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2194 fod->data_sg = NULL;
2195 fod->data_sg_cnt = 0;
2197 ret = nvmet_req_init(&fod->req,
2198 &fod->queue->nvme_cq,
2199 &fod->queue->nvme_sq,
2200 &nvmet_fc_tgt_fcp_ops);
2202 /* bad SQE content or invalid ctrl state */
2203 /* nvmet layer has already called op done to send rsp. */
2207 fod->req.transfer_len = xfrlen;
2209 /* keep a running counter of tail position */
2210 atomic_inc(&fod->queue->sqtail);
2212 if (fod->req.transfer_len) {
2213 ret = nvmet_fc_alloc_tgt_pgs(fod);
2215 nvmet_req_complete(&fod->req, ret);
2219 fod->req.sg = fod->data_sg;
2220 fod->req.sg_cnt = fod->data_sg_cnt;
2223 if (fod->io_dir == NVMET_FCP_WRITE) {
2224 /* pull the data over before invoking nvmet layer */
2225 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2232 * can invoke the nvmet_layer now. If read data, cmd completion will
2235 nvmet_req_execute(&fod->req);
2239 nvmet_fc_abort_op(tgtport, fod);
2243 * Actual processing routine for received FC-NVME LS Requests from the LLD
2246 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2248 struct nvmet_fc_fcp_iod *fod =
2249 container_of(work, struct nvmet_fc_fcp_iod, work);
2250 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2252 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2256 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2257 * upon the reception of a NVME FCP CMD IU.
2259 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2260 * layer for processing.
2262 * The nvmet_fc layer allocates a local job structure (struct
2263 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2264 * CMD IU buffer to the job structure. As such, on a successful
2265 * completion (returns 0), the LLDD may immediately free/reuse
2266 * the CMD IU buffer passed in the call.
2268 * However, in some circumstances, due to the packetized nature of FC
2269 * and the api of the FC LLDD which may issue a hw command to send the
2270 * response, but the LLDD may not get the hw completion for that command
2271 * and upcall the nvmet_fc layer before a new command may be
2272 * asynchronously received - its possible for a command to be received
2273 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2274 * the appearance of more commands received than fits in the sq.
2275 * To alleviate this scenario, a temporary queue is maintained in the
2276 * transport for pending LLDD requests waiting for a queue job structure.
2277 * In these "overrun" cases, a temporary queue element is allocated
2278 * the LLDD request and CMD iu buffer information remembered, and the
2279 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2280 * structure is freed, it is immediately reallocated for anything on the
2281 * pending request list. The LLDDs defer_rcv() callback is called,
2282 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2283 * is then started normally with the transport.
2285 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2286 * the completion as successful but must not reuse the CMD IU buffer
2287 * until the LLDD's defer_rcv() callback has been called for the
2288 * corresponding struct nvmefc_tgt_fcp_req pointer.
2290 * If there is any other condition in which an error occurs, the
2291 * transport will return a non-zero status indicating the error.
2292 * In all cases other than -EOVERFLOW, the transport has not accepted the
2293 * request and the LLDD should abort the exchange.
2295 * @target_port: pointer to the (registered) target port the FCP CMD IU
2297 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2298 * the exchange corresponding to the FCP Exchange.
2299 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2300 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2303 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2304 struct nvmefc_tgt_fcp_req *fcpreq,
2305 void *cmdiubuf, u32 cmdiubuf_len)
2307 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2308 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2309 struct nvmet_fc_tgt_queue *queue;
2310 struct nvmet_fc_fcp_iod *fod;
2311 struct nvmet_fc_defer_fcp_req *deferfcp;
2312 unsigned long flags;
2314 /* validate iu, so the connection id can be used to find the queue */
2315 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2316 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2317 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2318 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2321 queue = nvmet_fc_find_target_queue(tgtport,
2322 be64_to_cpu(cmdiu->connection_id));
2327 * note: reference taken by find_target_queue
2328 * After successful fod allocation, the fod will inherit the
2329 * ownership of that reference and will remove the reference
2330 * when the fod is freed.
2333 spin_lock_irqsave(&queue->qlock, flags);
2335 fod = nvmet_fc_alloc_fcp_iod(queue);
2337 spin_unlock_irqrestore(&queue->qlock, flags);
2339 fcpreq->nvmet_fc_private = fod;
2340 fod->fcpreq = fcpreq;
2342 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2344 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2349 if (!tgtport->ops->defer_rcv) {
2350 spin_unlock_irqrestore(&queue->qlock, flags);
2351 /* release the queue lookup reference */
2352 nvmet_fc_tgt_q_put(queue);
2356 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2357 struct nvmet_fc_defer_fcp_req, req_list);
2359 /* Just re-use one that was previously allocated */
2360 list_del(&deferfcp->req_list);
2362 spin_unlock_irqrestore(&queue->qlock, flags);
2364 /* Now we need to dynamically allocate one */
2365 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2367 /* release the queue lookup reference */
2368 nvmet_fc_tgt_q_put(queue);
2371 spin_lock_irqsave(&queue->qlock, flags);
2374 /* For now, use rspaddr / rsplen to save payload information */
2375 fcpreq->rspaddr = cmdiubuf;
2376 fcpreq->rsplen = cmdiubuf_len;
2377 deferfcp->fcp_req = fcpreq;
2379 /* defer processing till a fod becomes available */
2380 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2382 /* NOTE: the queue lookup reference is still valid */
2384 spin_unlock_irqrestore(&queue->qlock, flags);
2388 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2391 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2392 * upon the reception of an ABTS for a FCP command
2394 * Notify the transport that an ABTS has been received for a FCP command
2395 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2396 * LLDD believes the command is still being worked on
2397 * (template_ops->fcp_req_release() has not been called).
2399 * The transport will wait for any outstanding work (an op to the LLDD,
2400 * which the lldd should complete with error due to the ABTS; or the
2401 * completion from the nvmet layer of the nvme command), then will
2402 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2403 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2404 * to the ABTS either after return from this function (assuming any
2405 * outstanding op work has been terminated) or upon the callback being
2408 * @target_port: pointer to the (registered) target port the FCP CMD IU
2410 * @fcpreq: pointer to the fcpreq request structure that corresponds
2411 * to the exchange that received the ABTS.
2414 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2415 struct nvmefc_tgt_fcp_req *fcpreq)
2417 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2418 struct nvmet_fc_tgt_queue *queue;
2419 unsigned long flags;
2421 if (!fod || fod->fcpreq != fcpreq)
2422 /* job appears to have already completed, ignore abort */
2427 spin_lock_irqsave(&queue->qlock, flags);
2430 * mark as abort. The abort handler, invoked upon completion
2431 * of any work, will detect the aborted status and do the
2434 spin_lock(&fod->flock);
2436 fod->aborted = true;
2437 spin_unlock(&fod->flock);
2439 spin_unlock_irqrestore(&queue->qlock, flags);
2441 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2444 struct nvmet_fc_traddr {
2450 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2454 if (match_u64(sstr, &token64))
2462 * This routine validates and extracts the WWN's from the TRADDR string.
2463 * As kernel parsers need the 0x to determine number base, universally
2464 * build string to parse with 0x prefix before parsing name strings.
2467 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2469 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2470 substring_t wwn = { name, &name[sizeof(name)-1] };
2471 int nnoffset, pnoffset;
2473 /* validate it string one of the 2 allowed formats */
2474 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2475 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2476 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2477 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2478 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2479 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2480 NVME_FC_TRADDR_OXNNLEN;
2481 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2482 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2483 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2484 "pn-", NVME_FC_TRADDR_NNLEN))) {
2485 nnoffset = NVME_FC_TRADDR_NNLEN;
2486 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2492 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2494 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2495 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2498 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2499 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2505 pr_warn("%s: bad traddr string\n", __func__);
2510 nvmet_fc_add_port(struct nvmet_port *port)
2512 struct nvmet_fc_tgtport *tgtport;
2513 struct nvmet_fc_traddr traddr = { 0L, 0L };
2514 unsigned long flags;
2517 /* validate the address info */
2518 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2519 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2522 /* map the traddr address info to a target port */
2524 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2525 sizeof(port->disc_addr.traddr));
2530 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2531 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2532 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2533 (tgtport->fc_target_port.port_name == traddr.pn)) {
2534 tgtport->port = port;
2539 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2544 nvmet_fc_remove_port(struct nvmet_port *port)
2549 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2550 .owner = THIS_MODULE,
2551 .type = NVMF_TRTYPE_FC,
2553 .add_port = nvmet_fc_add_port,
2554 .remove_port = nvmet_fc_remove_port,
2555 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2556 .delete_ctrl = nvmet_fc_delete_ctrl,
2559 static int __init nvmet_fc_init_module(void)
2561 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2564 static void __exit nvmet_fc_exit_module(void)
2566 /* sanity check - all lports should be removed */
2567 if (!list_empty(&nvmet_fc_target_list))
2568 pr_warn("%s: targetport list not empty\n", __func__);
2570 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2572 ida_destroy(&nvmet_fc_tgtport_cnt);
2575 module_init(nvmet_fc_init_module);
2576 module_exit(nvmet_fc_exit_module);
2578 MODULE_LICENSE("GPL v2");