GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / scsi / lpfc / lpfc_nvme.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include "lpfc_version.h"
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
53
54 /* NVME initiator-based functions */
55
56 static struct lpfc_io_buf *
57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58                   int idx, int expedite);
59
60 static void
61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63 static struct nvme_fc_port_template lpfc_nvme_template;
64
65 /**
66  * lpfc_nvme_create_queue -
67  * @pnvme_lport: Transport localport that LS is to be issued from
68  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
69  * @qsize: Size of the queue in bytes
70  * @handle: An opaque driver handle used in follow-up calls.
71  *
72  * Driver registers this routine to preallocate and initialize any
73  * internal data structures to bind the @qidx to its internal IO queues.
74  * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75  *
76  * Return value :
77  *   0 - Success
78  *   -EINVAL - Unsupported input value.
79  *   -ENOMEM - Could not alloc necessary memory
80  **/
81 static int
82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83                        unsigned int qidx, u16 qsize,
84                        void **handle)
85 {
86         struct lpfc_nvme_lport *lport;
87         struct lpfc_vport *vport;
88         struct lpfc_nvme_qhandle *qhandle;
89         char *str;
90
91         if (!pnvme_lport->private)
92                 return -ENOMEM;
93
94         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
95         vport = lport->vport;
96
97         if (!vport || vport->load_flag & FC_UNLOADING ||
98             vport->phba->hba_flag & HBA_IOQ_FLUSH)
99                 return -ENODEV;
100
101         qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
102         if (qhandle == NULL)
103                 return -ENOMEM;
104
105         qhandle->cpu_id = raw_smp_processor_id();
106         qhandle->qidx = qidx;
107         /*
108          * NVME qidx == 0 is the admin queue, so both admin queue
109          * and first IO queue will use MSI-X vector and associated
110          * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
111          */
112         if (qidx) {
113                 str = "IO ";  /* IO queue */
114                 qhandle->index = ((qidx - 1) %
115                         lpfc_nvme_template.max_hw_queues);
116         } else {
117                 str = "ADM";  /* Admin queue */
118                 qhandle->index = qidx;
119         }
120
121         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
122                          "6073 Binding %s HdwQueue %d  (cpu %d) to "
123                          "hdw_queue %d qhandle x%px\n", str,
124                          qidx, qhandle->cpu_id, qhandle->index, qhandle);
125         *handle = (void *)qhandle;
126         return 0;
127 }
128
129 /**
130  * lpfc_nvme_delete_queue -
131  * @pnvme_lport: Transport localport that LS is to be issued from
132  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
133  * @handle: An opaque driver handle from lpfc_nvme_create_queue
134  *
135  * Driver registers this routine to free
136  * any internal data structures to bind the @qidx to its internal
137  * IO queues.
138  *
139  * Return value :
140  *   0 - Success
141  *   TODO:  What are the failure codes.
142  **/
143 static void
144 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
145                        unsigned int qidx,
146                        void *handle)
147 {
148         struct lpfc_nvme_lport *lport;
149         struct lpfc_vport *vport;
150
151         if (!pnvme_lport->private)
152                 return;
153
154         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
155         vport = lport->vport;
156
157         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
158                         "6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
159                         lport, qidx, handle);
160         kfree(handle);
161 }
162
163 static void
164 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
165 {
166         struct lpfc_nvme_lport *lport = localport->private;
167
168         lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
169                          "6173 localport x%px delete complete\n",
170                          lport);
171
172         /* release any threads waiting for the unreg to complete */
173         if (lport->vport->localport)
174                 complete(lport->lport_unreg_cmp);
175 }
176
177 /* lpfc_nvme_remoteport_delete
178  *
179  * @remoteport: Pointer to an nvme transport remoteport instance.
180  *
181  * This is a template downcall.  NVME transport calls this function
182  * when it has completed the unregistration of a previously
183  * registered remoteport.
184  *
185  * Return value :
186  * None
187  */
188 static void
189 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
190 {
191         struct lpfc_nvme_rport *rport = remoteport->private;
192         struct lpfc_vport *vport;
193         struct lpfc_nodelist *ndlp;
194         u32 fc4_xpt_flags;
195
196         ndlp = rport->ndlp;
197         if (!ndlp) {
198                 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
199                        __func__, rport, remoteport);
200                 goto rport_err;
201         }
202
203         vport = ndlp->vport;
204         if (!vport) {
205                 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
206                        __func__, ndlp, ndlp->nlp_state, rport);
207                 goto rport_err;
208         }
209
210         fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
211
212         /* Remove this rport from the lport's list - memory is owned by the
213          * transport. Remove the ndlp reference for the NVME transport before
214          * calling state machine to remove the node.
215          */
216         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
217                          "6146 remoteport delete of remoteport x%px, ndlp x%px "
218                          "DID x%x xflags x%x\n",
219                          remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
220         spin_lock_irq(&ndlp->lock);
221
222         /* The register rebind might have occurred before the delete
223          * downcall.  Guard against this race.
224          */
225         if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
226                 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
227
228         spin_unlock_irq(&ndlp->lock);
229
230         /* On a devloss timeout event, one more put is executed provided the
231          * NVME and SCSI rport unregister requests are complete.  If the vport
232          * is unloading, this extra put is executed by lpfc_drop_node.
233          */
234         if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
235                 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
236
237  rport_err:
238         return;
239 }
240
241 /**
242  * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
243  * @phba: pointer to lpfc hba data structure.
244  * @axchg: pointer to exchange context for the NVME LS request
245  *
246  * This routine is used for processing an asychronously received NVME LS
247  * request. Any remaining validation is done and the LS is then forwarded
248  * to the nvme-fc transport via nvme_fc_rcv_ls_req().
249  *
250  * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
251  * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
252  * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
253  *
254  * Returns 0 if LS was handled and delivered to the transport
255  * Returns 1 if LS failed to be handled and should be dropped
256  */
257 int
258 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
259                         struct lpfc_async_xchg_ctx *axchg)
260 {
261 #if (IS_ENABLED(CONFIG_NVME_FC))
262         struct lpfc_vport *vport;
263         struct lpfc_nvme_rport *lpfc_rport;
264         struct nvme_fc_remote_port *remoteport;
265         struct lpfc_nvme_lport *lport;
266         uint32_t *payload = axchg->payload;
267         int rc;
268
269         vport = axchg->ndlp->vport;
270         lpfc_rport = axchg->ndlp->nrport;
271         if (!lpfc_rport)
272                 return -EINVAL;
273
274         remoteport = lpfc_rport->remoteport;
275         if (!vport->localport ||
276             vport->phba->hba_flag & HBA_IOQ_FLUSH)
277                 return -EINVAL;
278
279         lport = vport->localport->private;
280         if (!lport)
281                 return -EINVAL;
282
283         rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
284                                 axchg->size);
285
286         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
287                         "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
288                         "%08x %08x %08x\n",
289                         axchg->size, rc,
290                         *payload, *(payload+1), *(payload+2),
291                         *(payload+3), *(payload+4), *(payload+5));
292
293         if (!rc)
294                 return 0;
295 #endif
296         return 1;
297 }
298
299 /**
300  * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
301  *        LS request.
302  * @phba: Pointer to HBA context object
303  * @vport: The local port that issued the LS
304  * @cmdwqe: Pointer to driver command WQE object.
305  * @wcqe: Pointer to driver response CQE object.
306  *
307  * This function is the generic completion handler for NVME LS requests.
308  * The function updates any states and statistics, calls the transport
309  * ls_req done() routine, then tears down the command and buffers used
310  * for the LS request.
311  **/
312 void
313 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
314                         struct lpfc_iocbq *cmdwqe,
315                         struct lpfc_wcqe_complete *wcqe)
316 {
317         struct nvmefc_ls_req *pnvme_lsreq;
318         struct lpfc_dmabuf *buf_ptr;
319         struct lpfc_nodelist *ndlp;
320         uint32_t status;
321
322         pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
323         ndlp = cmdwqe->ndlp;
324         buf_ptr = cmdwqe->bpl_dmabuf;
325
326         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
327
328         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
329                          "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
330                          "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
331                          "ndlp:x%px\n",
332                          pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
333                          cmdwqe->sli4_xritag, status,
334                          (wcqe->parameter & 0xffff),
335                          cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
336                          ndlp);
337
338         lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
339                          cmdwqe->sli4_xritag, status, wcqe->parameter);
340
341         if (buf_ptr) {
342                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
343                 kfree(buf_ptr);
344                 cmdwqe->bpl_dmabuf = NULL;
345         }
346         if (pnvme_lsreq->done)
347                 pnvme_lsreq->done(pnvme_lsreq, status);
348         else
349                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
350                                  "6046 NVMEx cmpl without done call back? "
351                                  "Data x%px DID %x Xri: %x status %x\n",
352                                 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
353                                 cmdwqe->sli4_xritag, status);
354         if (ndlp) {
355                 lpfc_nlp_put(ndlp);
356                 cmdwqe->ndlp = NULL;
357         }
358         lpfc_sli_release_iocbq(phba, cmdwqe);
359 }
360
361 static void
362 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
363                      struct lpfc_iocbq *rspwqe)
364 {
365         struct lpfc_vport *vport = cmdwqe->vport;
366         struct lpfc_nvme_lport *lport;
367         uint32_t status;
368         struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
369
370         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
371
372         if (vport->localport) {
373                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
374                 if (lport) {
375                         atomic_inc(&lport->fc4NvmeLsCmpls);
376                         if (status) {
377                                 if (bf_get(lpfc_wcqe_c_xb, wcqe))
378                                         atomic_inc(&lport->cmpl_ls_xb);
379                                 atomic_inc(&lport->cmpl_ls_err);
380                         }
381                 }
382         }
383
384         __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
385 }
386
387 static int
388 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
389                   struct lpfc_dmabuf *inp,
390                   struct nvmefc_ls_req *pnvme_lsreq,
391                   void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
392                                struct lpfc_iocbq *),
393                   struct lpfc_nodelist *ndlp, uint32_t num_entry,
394                   uint32_t tmo, uint8_t retry)
395 {
396         struct lpfc_hba *phba = vport->phba;
397         union lpfc_wqe128 *wqe;
398         struct lpfc_iocbq *genwqe;
399         struct ulp_bde64 *bpl;
400         struct ulp_bde64 bde;
401         int i, rc, xmit_len, first_len;
402
403         /* Allocate buffer for  command WQE */
404         genwqe = lpfc_sli_get_iocbq(phba);
405         if (genwqe == NULL)
406                 return 1;
407
408         wqe = &genwqe->wqe;
409         /* Initialize only 64 bytes */
410         memset(wqe, 0, sizeof(union lpfc_wqe));
411
412         genwqe->bpl_dmabuf = bmp;
413         genwqe->cmd_flag |= LPFC_IO_NVME_LS;
414
415         /* Save for completion so we can release these resources */
416         genwqe->ndlp = lpfc_nlp_get(ndlp);
417         if (!genwqe->ndlp) {
418                 dev_warn(&phba->pcidev->dev,
419                          "Warning: Failed node ref, not sending LS_REQ\n");
420                 lpfc_sli_release_iocbq(phba, genwqe);
421                 return 1;
422         }
423
424         genwqe->context_un.nvme_lsreq = pnvme_lsreq;
425         /* Fill in payload, bp points to frame payload */
426
427         if (!tmo)
428                 /* FC spec states we need 3 * ratov for CT requests */
429                 tmo = (3 * phba->fc_ratov);
430
431         /* For this command calculate the xmit length of the request bde. */
432         xmit_len = 0;
433         first_len = 0;
434         bpl = (struct ulp_bde64 *)bmp->virt;
435         for (i = 0; i < num_entry; i++) {
436                 bde.tus.w = bpl[i].tus.w;
437                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
438                         break;
439                 xmit_len += bde.tus.f.bdeSize;
440                 if (i == 0)
441                         first_len = xmit_len;
442         }
443
444         genwqe->num_bdes = num_entry;
445         genwqe->hba_wqidx = 0;
446
447         /* Words 0 - 2 */
448         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
449         wqe->generic.bde.tus.f.bdeSize = first_len;
450         wqe->generic.bde.addrLow = bpl[0].addrLow;
451         wqe->generic.bde.addrHigh = bpl[0].addrHigh;
452
453         /* Word 3 */
454         wqe->gen_req.request_payload_len = first_len;
455
456         /* Word 4 */
457
458         /* Word 5 */
459         bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
460         bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
461         bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
462         bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
463         bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
464
465         /* Word 6 */
466         bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
467                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
468         bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
469
470         /* Word 7 */
471         bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
472         bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
473         bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
474         bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
475
476         /* Word 8 */
477         wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
478
479         /* Word 9 */
480         bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
481
482         /* Word 10 */
483         bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
484         bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
485         bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
486         bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
487         bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
488
489         /* Word 11 */
490         bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
491         bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
492
493
494         /* Issue GEN REQ WQE for NPORT <did> */
495         genwqe->cmd_cmpl = cmpl;
496         genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
497         genwqe->vport = vport;
498         genwqe->retry = retry;
499
500         lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
501                          genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
502
503         rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
504         if (rc) {
505                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
506                                  "6045 Issue GEN REQ WQE to NPORT x%x "
507                                  "Data: x%x x%x  rc x%x\n",
508                                  ndlp->nlp_DID, genwqe->iotag,
509                                  vport->port_state, rc);
510                 lpfc_nlp_put(ndlp);
511                 lpfc_sli_release_iocbq(phba, genwqe);
512                 return 1;
513         }
514
515         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
516                          "6050 Issue GEN REQ WQE to NPORT x%x "
517                          "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
518                          "bmp:x%px xmit:%d 1st:%d\n",
519                          ndlp->nlp_DID, genwqe->sli4_xritag,
520                          vport->port_state,
521                          genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
522         return 0;
523 }
524
525
526 /**
527  * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
528  * @vport: The local port issuing the LS
529  * @ndlp: The remote port to send the LS to
530  * @pnvme_lsreq: Pointer to LS request structure from the transport
531  * @gen_req_cmp: Completion call-back
532  *
533  * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
534  * WQE to perform the LS operation.
535  *
536  * Return value :
537  *   0 - Success
538  *   non-zero: various error codes, in form of -Exxx
539  **/
540 int
541 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
542                       struct nvmefc_ls_req *pnvme_lsreq,
543                       void (*gen_req_cmp)(struct lpfc_hba *phba,
544                                 struct lpfc_iocbq *cmdwqe,
545                                 struct lpfc_iocbq *rspwqe))
546 {
547         struct lpfc_dmabuf *bmp;
548         struct ulp_bde64 *bpl;
549         int ret;
550         uint16_t ntype, nstate;
551
552         if (!ndlp) {
553                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
554                                  "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
555                                  "LS Req\n",
556                                  ndlp);
557                 return -ENODEV;
558         }
559
560         ntype = ndlp->nlp_type;
561         nstate = ndlp->nlp_state;
562         if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
563             (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
564                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
565                                  "6088 NVMEx LS REQ: Fail DID x%06x not "
566                                  "ready for IO. Type x%x, State x%x\n",
567                                  ndlp->nlp_DID, ntype, nstate);
568                 return -ENODEV;
569         }
570         if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
571                 return -ENODEV;
572
573         if (!vport->phba->sli4_hba.nvmels_wq)
574                 return -ENOMEM;
575
576         /*
577          * there are two dma buf in the request, actually there is one and
578          * the second one is just the start address + cmd size.
579          * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
580          * in a lpfc_dmabuf struct. When freeing we just free the wrapper
581          * because the nvem layer owns the data bufs.
582          * We do not have to break these packets open, we don't care what is
583          * in them. And we do not have to look at the resonse data, we only
584          * care that we got a response. All of the caring is going to happen
585          * in the nvme-fc layer.
586          */
587
588         bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
589         if (!bmp) {
590                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
591                                  "6044 NVMEx LS REQ: Could not alloc LS buf "
592                                  "for DID %x\n",
593                                  ndlp->nlp_DID);
594                 return -ENOMEM;
595         }
596
597         bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
598         if (!bmp->virt) {
599                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
600                                  "6042 NVMEx LS REQ: Could not alloc mbuf "
601                                  "for DID %x\n",
602                                  ndlp->nlp_DID);
603                 kfree(bmp);
604                 return -ENOMEM;
605         }
606
607         INIT_LIST_HEAD(&bmp->list);
608
609         bpl = (struct ulp_bde64 *)bmp->virt;
610         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
611         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
612         bpl->tus.f.bdeFlags = 0;
613         bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
614         bpl->tus.w = le32_to_cpu(bpl->tus.w);
615         bpl++;
616
617         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
618         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
619         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
620         bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
621         bpl->tus.w = le32_to_cpu(bpl->tus.w);
622
623         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
624                         "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
625                         "rqstlen:%d rsplen:%d %pad %pad\n",
626                         ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
627                         pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
628                         &pnvme_lsreq->rspdma);
629
630         ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
631                                 pnvme_lsreq, gen_req_cmp, ndlp, 2,
632                                 pnvme_lsreq->timeout, 0);
633         if (ret != WQE_SUCCESS) {
634                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
635                                  "6052 NVMEx REQ: EXIT. issue ls wqe failed "
636                                  "lsreq x%px Status %x DID %x\n",
637                                  pnvme_lsreq, ret, ndlp->nlp_DID);
638                 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
639                 kfree(bmp);
640                 return -EIO;
641         }
642
643         return 0;
644 }
645
646 /**
647  * lpfc_nvme_ls_req - Issue an NVME Link Service request
648  * @pnvme_lport: Transport localport that LS is to be issued from.
649  * @pnvme_rport: Transport remoteport that LS is to be sent to.
650  * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
651  *
652  * Driver registers this routine to handle any link service request
653  * from the nvme_fc transport to a remote nvme-aware port.
654  *
655  * Return value :
656  *   0 - Success
657  *   non-zero: various error codes, in form of -Exxx
658  **/
659 static int
660 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
661                  struct nvme_fc_remote_port *pnvme_rport,
662                  struct nvmefc_ls_req *pnvme_lsreq)
663 {
664         struct lpfc_nvme_lport *lport;
665         struct lpfc_nvme_rport *rport;
666         struct lpfc_vport *vport;
667         int ret;
668
669         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
670         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
671         if (unlikely(!lport) || unlikely(!rport))
672                 return -EINVAL;
673
674         vport = lport->vport;
675         if (vport->load_flag & FC_UNLOADING ||
676             vport->phba->hba_flag & HBA_IOQ_FLUSH)
677                 return -ENODEV;
678
679         atomic_inc(&lport->fc4NvmeLsRequests);
680
681         ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
682                                  lpfc_nvme_ls_req_cmp);
683         if (ret)
684                 atomic_inc(&lport->xmt_ls_err);
685
686         return ret;
687 }
688
689 /**
690  * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
691  *         NVME LS request
692  * @vport: The local port that issued the LS
693  * @ndlp: The remote port the LS was sent to
694  * @pnvme_lsreq: Pointer to LS request structure from the transport
695  *
696  * The driver validates the ndlp, looks for the LS, and aborts the
697  * LS if found.
698  *
699  * Returns:
700  * 0 : if LS found and aborted
701  * non-zero: various error conditions in form -Exxx
702  **/
703 int
704 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
705                         struct nvmefc_ls_req *pnvme_lsreq)
706 {
707         struct lpfc_hba *phba = vport->phba;
708         struct lpfc_sli_ring *pring;
709         struct lpfc_iocbq *wqe, *next_wqe;
710         bool foundit = false;
711
712         if (!ndlp) {
713                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
714                                 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
715                                 "x%06x, Failing LS Req\n",
716                                 ndlp, ndlp ? ndlp->nlp_DID : 0);
717                 return -EINVAL;
718         }
719
720         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
721                          "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
722                          "x%px rqstlen:%d rsplen:%d %pad %pad\n",
723                          pnvme_lsreq, pnvme_lsreq->rqstlen,
724                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
725                          &pnvme_lsreq->rspdma);
726
727         /*
728          * Lock the ELS ring txcmplq and look for the wqe that matches
729          * this ELS. If found, issue an abort on the wqe.
730          */
731         pring = phba->sli4_hba.nvmels_wq->pring;
732         spin_lock_irq(&phba->hbalock);
733         spin_lock(&pring->ring_lock);
734         list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
735                 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
736                         wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
737                         foundit = true;
738                         break;
739                 }
740         }
741         spin_unlock(&pring->ring_lock);
742
743         if (foundit)
744                 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
745         spin_unlock_irq(&phba->hbalock);
746
747         if (foundit)
748                 return 0;
749
750         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
751                          "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
752                          pnvme_lsreq);
753         return -EINVAL;
754 }
755
756 static int
757 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
758                      struct nvme_fc_remote_port *remoteport,
759                      struct nvmefc_ls_rsp *ls_rsp)
760 {
761         struct lpfc_async_xchg_ctx *axchg =
762                 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
763         struct lpfc_nvme_lport *lport;
764         int rc;
765
766         if (axchg->phba->pport->load_flag & FC_UNLOADING)
767                 return -ENODEV;
768
769         lport = (struct lpfc_nvme_lport *)localport->private;
770
771         rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
772
773         if (rc) {
774                 /*
775                  * unless the failure is due to having already sent
776                  * the response, an abort will be generated for the
777                  * exchange if the rsp can't be sent.
778                  */
779                 if (rc != -EALREADY)
780                         atomic_inc(&lport->xmt_ls_abort);
781                 return rc;
782         }
783
784         return 0;
785 }
786
787 /**
788  * lpfc_nvme_ls_abort - Abort a prior NVME LS request
789  * @pnvme_lport: Transport localport that LS is to be issued from.
790  * @pnvme_rport: Transport remoteport that LS is to be sent to.
791  * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
792  *
793  * Driver registers this routine to abort a NVME LS request that is
794  * in progress (from the transports perspective).
795  **/
796 static void
797 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
798                    struct nvme_fc_remote_port *pnvme_rport,
799                    struct nvmefc_ls_req *pnvme_lsreq)
800 {
801         struct lpfc_nvme_lport *lport;
802         struct lpfc_vport *vport;
803         struct lpfc_nodelist *ndlp;
804         int ret;
805
806         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
807         if (unlikely(!lport))
808                 return;
809         vport = lport->vport;
810
811         if (vport->load_flag & FC_UNLOADING)
812                 return;
813
814         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
815
816         ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
817         if (!ret)
818                 atomic_inc(&lport->xmt_ls_abort);
819 }
820
821 /* Fix up the existing sgls for NVME IO. */
822 static inline void
823 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
824                        struct lpfc_io_buf *lpfc_ncmd,
825                        struct nvmefc_fcp_req *nCmd)
826 {
827         struct lpfc_hba  *phba = vport->phba;
828         struct sli4_sge *sgl;
829         union lpfc_wqe128 *wqe;
830         uint32_t *wptr, *dptr;
831
832         /*
833          * Get a local pointer to the built-in wqe and correct
834          * the cmd size to match NVME's 96 bytes and fix
835          * the dma address.
836          */
837
838         wqe = &lpfc_ncmd->cur_iocbq.wqe;
839
840         /*
841          * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
842          * match NVME.  NVME sends 96 bytes. Also, use the
843          * nvme commands command and response dma addresses
844          * rather than the virtual memory to ease the restore
845          * operation.
846          */
847         sgl = lpfc_ncmd->dma_sgl;
848         sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
849         if (phba->cfg_nvme_embed_cmd) {
850                 sgl->addr_hi = 0;
851                 sgl->addr_lo = 0;
852
853                 /* Word 0-2 - NVME CMND IU (embedded payload) */
854                 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
855                 wqe->generic.bde.tus.f.bdeSize = 56;
856                 wqe->generic.bde.addrHigh = 0;
857                 wqe->generic.bde.addrLow =  64;  /* Word 16 */
858
859                 /* Word 10  - dbde is 0, wqes is 1 in template */
860
861                 /*
862                  * Embed the payload in the last half of the WQE
863                  * WQE words 16-30 get the NVME CMD IU payload
864                  *
865                  * WQE words 16-19 get payload Words 1-4
866                  * WQE words 20-21 get payload Words 6-7
867                  * WQE words 22-29 get payload Words 16-23
868                  */
869                 wptr = &wqe->words[16];  /* WQE ptr */
870                 dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
871                 dptr++;                 /* Skip Word 0 in payload */
872
873                 *wptr++ = *dptr++;      /* Word 1 */
874                 *wptr++ = *dptr++;      /* Word 2 */
875                 *wptr++ = *dptr++;      /* Word 3 */
876                 *wptr++ = *dptr++;      /* Word 4 */
877                 dptr++;                 /* Skip Word 5 in payload */
878                 *wptr++ = *dptr++;      /* Word 6 */
879                 *wptr++ = *dptr++;      /* Word 7 */
880                 dptr += 8;              /* Skip Words 8-15 in payload */
881                 *wptr++ = *dptr++;      /* Word 16 */
882                 *wptr++ = *dptr++;      /* Word 17 */
883                 *wptr++ = *dptr++;      /* Word 18 */
884                 *wptr++ = *dptr++;      /* Word 19 */
885                 *wptr++ = *dptr++;      /* Word 20 */
886                 *wptr++ = *dptr++;      /* Word 21 */
887                 *wptr++ = *dptr++;      /* Word 22 */
888                 *wptr   = *dptr;        /* Word 23 */
889         } else {
890                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
891                 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
892
893                 /* Word 0-2 - NVME CMND IU Inline BDE */
894                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
895                 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
896                 wqe->generic.bde.addrHigh = sgl->addr_hi;
897                 wqe->generic.bde.addrLow =  sgl->addr_lo;
898
899                 /* Word 10 */
900                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
901                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
902         }
903
904         sgl++;
905
906         /* Setup the physical region for the FCP RSP */
907         sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
908         sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
909         sgl->word2 = le32_to_cpu(sgl->word2);
910         if (nCmd->sg_cnt)
911                 bf_set(lpfc_sli4_sge_last, sgl, 0);
912         else
913                 bf_set(lpfc_sli4_sge_last, sgl, 1);
914         sgl->word2 = cpu_to_le32(sgl->word2);
915         sgl->sge_len = cpu_to_le32(nCmd->rsplen);
916 }
917
918
919 /*
920  * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
921  *
922  * Driver registers this routine as it io request handler.  This
923  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
924  * data structure to the rport indicated in @lpfc_nvme_rport.
925  *
926  * Return value :
927  *   0 - Success
928  *   TODO: What are the failure codes.
929  **/
930 static void
931 lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
932                       struct lpfc_iocbq *pwqeOut)
933 {
934         struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
935         struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
936         struct lpfc_vport *vport = pwqeIn->vport;
937         struct nvmefc_fcp_req *nCmd;
938         struct nvme_fc_ersp_iu *ep;
939         struct nvme_fc_cmd_iu *cp;
940         struct lpfc_nodelist *ndlp;
941         struct lpfc_nvme_fcpreq_priv *freqpriv;
942         struct lpfc_nvme_lport *lport;
943         uint32_t code, status, idx;
944         uint16_t cid, sqhd, data;
945         uint32_t *ptr;
946         uint32_t lat;
947         bool call_done = false;
948 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
949         int cpu;
950 #endif
951         int offline = 0;
952
953         /* Sanity check on return of outstanding command */
954         if (!lpfc_ncmd) {
955                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
956                                  "6071 Null lpfc_ncmd pointer. No "
957                                  "release, skip completion\n");
958                 return;
959         }
960
961         /* Guard against abort handler being called at same time */
962         spin_lock(&lpfc_ncmd->buf_lock);
963
964         if (!lpfc_ncmd->nvmeCmd) {
965                 spin_unlock(&lpfc_ncmd->buf_lock);
966                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
967                                  "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
968                                  "nvmeCmd x%px\n",
969                                  lpfc_ncmd, lpfc_ncmd->nvmeCmd);
970
971                 /* Release the lpfc_ncmd regardless of the missing elements. */
972                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
973                 return;
974         }
975         nCmd = lpfc_ncmd->nvmeCmd;
976         status = bf_get(lpfc_wcqe_c_status, wcqe);
977
978         idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
979         phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
980
981         if (unlikely(status && vport->localport)) {
982                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
983                 if (lport) {
984                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
985                                 atomic_inc(&lport->cmpl_fcp_xb);
986                         atomic_inc(&lport->cmpl_fcp_err);
987                 }
988         }
989
990         lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
991                          lpfc_ncmd->cur_iocbq.sli4_xritag,
992                          status, wcqe->parameter);
993         /*
994          * Catch race where our node has transitioned, but the
995          * transport is still transitioning.
996          */
997         ndlp = lpfc_ncmd->ndlp;
998         if (!ndlp) {
999                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1000                                  "6062 Ignoring NVME cmpl.  No ndlp\n");
1001                 goto out_err;
1002         }
1003
1004         code = bf_get(lpfc_wcqe_c_code, wcqe);
1005         if (code == CQE_CODE_NVME_ERSP) {
1006                 /* For this type of CQE, we need to rebuild the rsp */
1007                 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1008
1009                 /*
1010                  * Get Command Id from cmd to plug into response. This
1011                  * code is not needed in the next NVME Transport drop.
1012                  */
1013                 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1014                 cid = cp->sqe.common.command_id;
1015
1016                 /*
1017                  * RSN is in CQE word 2
1018                  * SQHD is in CQE Word 3 bits 15:0
1019                  * Cmd Specific info is in CQE Word 1
1020                  * and in CQE Word 0 bits 15:0
1021                  */
1022                 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1023
1024                 /* Now lets build the NVME ERSP IU */
1025                 ep->iu_len = cpu_to_be16(8);
1026                 ep->rsn = wcqe->parameter;
1027                 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1028                 ep->rsvd12 = 0;
1029                 ptr = (uint32_t *)&ep->cqe.result.u64;
1030                 *ptr++ = wcqe->total_data_placed;
1031                 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1032                 *ptr = (uint32_t)data;
1033                 ep->cqe.sq_head = sqhd;
1034                 ep->cqe.sq_id =  nCmd->sqid;
1035                 ep->cqe.command_id = cid;
1036                 ep->cqe.status = 0;
1037
1038                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1039                 lpfc_ncmd->result = 0;
1040                 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1041                 nCmd->transferred_length = nCmd->payload_length;
1042         } else {
1043                 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1044                 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1045
1046                 /* For NVME, the only failure path that results in an
1047                  * IO error is when the adapter rejects it.  All other
1048                  * conditions are a success case and resolved by the
1049                  * transport.
1050                  * IOSTAT_FCP_RSP_ERROR means:
1051                  * 1. Length of data received doesn't match total
1052                  *    transfer length in WQE
1053                  * 2. If the RSP payload does NOT match these cases:
1054                  *    a. RSP length 12/24 bytes and all zeros
1055                  *    b. NVME ERSP
1056                  */
1057                 switch (lpfc_ncmd->status) {
1058                 case IOSTAT_SUCCESS:
1059                         nCmd->transferred_length = wcqe->total_data_placed;
1060                         nCmd->rcv_rsplen = 0;
1061                         nCmd->status = 0;
1062                         break;
1063                 case IOSTAT_FCP_RSP_ERROR:
1064                         nCmd->transferred_length = wcqe->total_data_placed;
1065                         nCmd->rcv_rsplen = wcqe->parameter;
1066                         nCmd->status = 0;
1067
1068                         /* Get the NVME cmd details for this unique error. */
1069                         cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1070                         ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1071
1072                         /* Check if this is really an ERSP */
1073                         if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1074                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1075                                 lpfc_ncmd->result = 0;
1076
1077                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1078                                         "6084 NVME FCP_ERR ERSP: "
1079                                         "xri %x placed x%x opcode x%x cmd_id "
1080                                         "x%x cqe_status x%x\n",
1081                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1082                                         wcqe->total_data_placed,
1083                                         cp->sqe.common.opcode,
1084                                         cp->sqe.common.command_id,
1085                                         ep->cqe.status);
1086                                 break;
1087                         }
1088                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1089                                          "6081 NVME Completion Protocol Error: "
1090                                          "xri %x status x%x result x%x "
1091                                          "placed x%x opcode x%x cmd_id x%x, "
1092                                          "cqe_status x%x\n",
1093                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1094                                          lpfc_ncmd->status, lpfc_ncmd->result,
1095                                          wcqe->total_data_placed,
1096                                          cp->sqe.common.opcode,
1097                                          cp->sqe.common.command_id,
1098                                          ep->cqe.status);
1099                         break;
1100                 case IOSTAT_LOCAL_REJECT:
1101                         /* Let fall through to set command final state. */
1102                         if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1103                                 lpfc_printf_vlog(vport, KERN_INFO,
1104                                          LOG_NVME_IOERR,
1105                                          "6032 Delay Aborted cmd x%px "
1106                                          "nvme cmd x%px, xri x%x, "
1107                                          "xb %d\n",
1108                                          lpfc_ncmd, nCmd,
1109                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1110                                          bf_get(lpfc_wcqe_c_xb, wcqe));
1111                         fallthrough;
1112                 default:
1113 out_err:
1114                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1115                                          "6072 NVME Completion Error: xri %x "
1116                                          "status x%x result x%x [x%x] "
1117                                          "placed x%x\n",
1118                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1119                                          lpfc_ncmd->status, lpfc_ncmd->result,
1120                                          wcqe->parameter,
1121                                          wcqe->total_data_placed);
1122                         nCmd->transferred_length = 0;
1123                         nCmd->rcv_rsplen = 0;
1124                         nCmd->status = NVME_SC_INTERNAL;
1125                         offline = pci_channel_offline(vport->phba->pcidev);
1126                 }
1127         }
1128
1129         /* pick up SLI4 exhange busy condition */
1130         if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
1131                 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1132         else
1133                 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1134
1135         /* Update stats and complete the IO.  There is
1136          * no need for dma unprep because the nvme_transport
1137          * owns the dma address.
1138          */
1139 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1140         if (lpfc_ncmd->ts_cmd_start) {
1141                 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1142                 lpfc_ncmd->ts_data_io = ktime_get_ns();
1143                 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1144                 lpfc_io_ktime(phba, lpfc_ncmd);
1145         }
1146         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1147                 cpu = raw_smp_processor_id();
1148                 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1149                 if (lpfc_ncmd->cpu != cpu)
1150                         lpfc_printf_vlog(vport,
1151                                          KERN_INFO, LOG_NVME_IOERR,
1152                                          "6701 CPU Check cmpl: "
1153                                          "cpu %d expect %d\n",
1154                                          cpu, lpfc_ncmd->cpu);
1155         }
1156 #endif
1157
1158         /* NVME targets need completion held off until the abort exchange
1159          * completes unless the NVME Rport is getting unregistered.
1160          */
1161
1162         if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1163                 freqpriv = nCmd->private;
1164                 freqpriv->nvme_buf = NULL;
1165                 lpfc_ncmd->nvmeCmd = NULL;
1166                 call_done = true;
1167         }
1168         spin_unlock(&lpfc_ncmd->buf_lock);
1169
1170         /* Check if IO qualified for CMF */
1171         if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1172             nCmd->io_dir == NVMEFC_FCP_READ &&
1173             nCmd->payload_length) {
1174                 /* Used when calculating average latency */
1175                 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
1176                 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
1177         }
1178
1179         if (call_done)
1180                 nCmd->done(nCmd);
1181
1182         /* Call release with XB=1 to queue the IO into the abort list. */
1183         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1184 }
1185
1186
1187 /**
1188  * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1189  * @vport: pointer to a host virtual N_Port data structure
1190  * @lpfc_ncmd: Pointer to lpfc scsi command
1191  * @pnode: pointer to a node-list data structure
1192  * @cstat: pointer to the control status structure
1193  *
1194  * Driver registers this routine as it io request handler.  This
1195  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1196  * data structure to the rport indicated in @lpfc_nvme_rport.
1197  *
1198  * Return value :
1199  *   0 - Success
1200  *   TODO: What are the failure codes.
1201  **/
1202 static int
1203 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1204                       struct lpfc_io_buf *lpfc_ncmd,
1205                       struct lpfc_nodelist *pnode,
1206                       struct lpfc_fc4_ctrl_stat *cstat)
1207 {
1208         struct lpfc_hba *phba = vport->phba;
1209         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1210         struct nvme_common_command *sqe;
1211         struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
1212         union lpfc_wqe128 *wqe = &pwqeq->wqe;
1213         uint32_t req_len;
1214
1215         /*
1216          * There are three possibilities here - use scatter-gather segment, use
1217          * the single mapping, or neither.
1218          */
1219         if (nCmd->sg_cnt) {
1220                 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1221                         /* From the iwrite template, initialize words 7 - 11 */
1222                         memcpy(&wqe->words[7],
1223                                &lpfc_iwrite_cmd_template.words[7],
1224                                sizeof(uint32_t) * 5);
1225
1226                         /* Word 4 */
1227                         wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1228
1229                         /* Word 5 */
1230                         if ((phba->cfg_nvme_enable_fb) &&
1231                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
1232                                 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1233                                 if (req_len < pnode->nvme_fb_size)
1234                                         wqe->fcp_iwrite.initial_xfer_len =
1235                                                 req_len;
1236                                 else
1237                                         wqe->fcp_iwrite.initial_xfer_len =
1238                                                 pnode->nvme_fb_size;
1239                         } else {
1240                                 wqe->fcp_iwrite.initial_xfer_len = 0;
1241                         }
1242                         cstat->output_requests++;
1243                 } else {
1244                         /* From the iread template, initialize words 7 - 11 */
1245                         memcpy(&wqe->words[7],
1246                                &lpfc_iread_cmd_template.words[7],
1247                                sizeof(uint32_t) * 5);
1248
1249                         /* Word 4 */
1250                         wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1251
1252                         /* Word 5 */
1253                         wqe->fcp_iread.rsrvd5 = 0;
1254
1255                         /* For a CMF Managed port, iod must be zero'ed */
1256                         if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
1257                                 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1258                                        LPFC_WQE_IOD_NONE);
1259                         cstat->input_requests++;
1260                 }
1261         } else {
1262                 /* From the icmnd template, initialize words 4 - 11 */
1263                 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1264                        sizeof(uint32_t) * 8);
1265                 cstat->control_requests++;
1266         }
1267
1268         if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
1269                 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1270                 sqe = &((struct nvme_fc_cmd_iu *)
1271                         nCmd->cmdaddr)->sqe.common;
1272                 if (sqe->opcode == nvme_admin_async_event)
1273                         bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
1274         }
1275
1276         /*
1277          * Finish initializing those WQE fields that are independent
1278          * of the nvme_cmnd request_buffer
1279          */
1280
1281         /* Word 3 */
1282         bf_set(payload_offset_len, &wqe->fcp_icmd,
1283                (nCmd->rsplen + nCmd->cmdlen));
1284
1285         /* Word 6 */
1286         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1287                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1288         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1289
1290         /* Word 8 */
1291         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1292
1293         /* Word 9 */
1294         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1295
1296         /* Word 10 */
1297         bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1298
1299         /* Words 13 14 15 are for PBDE support */
1300
1301         /* add the VMID tags as per switch response */
1302         if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
1303                 if (phba->pport->vmid_priority_tagging) {
1304                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
1305                         bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
1306                                lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
1307                 } else {
1308                         bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
1309                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
1310                         wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
1311                 }
1312         }
1313
1314         pwqeq->vport = vport;
1315         return 0;
1316 }
1317
1318
1319 /**
1320  * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1321  * @vport: pointer to a host virtual N_Port data structure
1322  * @lpfc_ncmd: Pointer to lpfc scsi command
1323  *
1324  * Driver registers this routine as it io request handler.  This
1325  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1326  * data structure to the rport indicated in @lpfc_nvme_rport.
1327  *
1328  * Return value :
1329  *   0 - Success
1330  *   TODO: What are the failure codes.
1331  **/
1332 static int
1333 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1334                       struct lpfc_io_buf *lpfc_ncmd)
1335 {
1336         struct lpfc_hba *phba = vport->phba;
1337         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1338         union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1339         struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1340         struct sli4_hybrid_sgl *sgl_xtra = NULL;
1341         struct scatterlist *data_sg;
1342         struct sli4_sge *first_data_sgl;
1343         struct ulp_bde64 *bde;
1344         dma_addr_t physaddr = 0;
1345         uint32_t dma_len = 0;
1346         uint32_t dma_offset = 0;
1347         int nseg, i, j;
1348         bool lsp_just_set = false;
1349
1350         /* Fix up the command and response DMA stuff. */
1351         lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1352
1353         /*
1354          * There are three possibilities here - use scatter-gather segment, use
1355          * the single mapping, or neither.
1356          */
1357         if (nCmd->sg_cnt) {
1358                 /*
1359                  * Jump over the cmd and rsp SGEs.  The fix routine
1360                  * has already adjusted for this.
1361                  */
1362                 sgl += 2;
1363
1364                 first_data_sgl = sgl;
1365                 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1366                 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1367                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1368                                         "6058 Too many sg segments from "
1369                                         "NVME Transport.  Max %d, "
1370                                         "nvmeIO sg_cnt %d\n",
1371                                         phba->cfg_nvme_seg_cnt + 1,
1372                                         lpfc_ncmd->seg_cnt);
1373                         lpfc_ncmd->seg_cnt = 0;
1374                         return 1;
1375                 }
1376
1377                 /*
1378                  * The driver established a maximum scatter-gather segment count
1379                  * during probe that limits the number of sg elements in any
1380                  * single nvme command.  Just run through the seg_cnt and format
1381                  * the sge's.
1382                  */
1383                 nseg = nCmd->sg_cnt;
1384                 data_sg = nCmd->first_sgl;
1385
1386                 /* for tracking the segment boundaries */
1387                 j = 2;
1388                 for (i = 0; i < nseg; i++) {
1389                         if (data_sg == NULL) {
1390                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1391                                                 "6059 dptr err %d, nseg %d\n",
1392                                                 i, nseg);
1393                                 lpfc_ncmd->seg_cnt = 0;
1394                                 return 1;
1395                         }
1396
1397                         sgl->word2 = 0;
1398                         if (nseg == 1) {
1399                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1400                                 bf_set(lpfc_sli4_sge_type, sgl,
1401                                        LPFC_SGE_TYPE_DATA);
1402                         } else {
1403                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1404
1405                                 /* expand the segment */
1406                                 if (!lsp_just_set &&
1407                                     !((j + 1) % phba->border_sge_num) &&
1408                                     ((nseg - 1) != i)) {
1409                                         /* set LSP type */
1410                                         bf_set(lpfc_sli4_sge_type, sgl,
1411                                                LPFC_SGE_TYPE_LSP);
1412
1413                                         sgl_xtra = lpfc_get_sgl_per_hdwq(
1414                                                         phba, lpfc_ncmd);
1415
1416                                         if (unlikely(!sgl_xtra)) {
1417                                                 lpfc_ncmd->seg_cnt = 0;
1418                                                 return 1;
1419                                         }
1420                                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
1421                                                        sgl_xtra->dma_phys_sgl));
1422                                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1423                                                        sgl_xtra->dma_phys_sgl));
1424
1425                                 } else {
1426                                         bf_set(lpfc_sli4_sge_type, sgl,
1427                                                LPFC_SGE_TYPE_DATA);
1428                                 }
1429                         }
1430
1431                         if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1432                                      LPFC_SGE_TYPE_LSP)) {
1433                                 if ((nseg - 1) == i)
1434                                         bf_set(lpfc_sli4_sge_last, sgl, 1);
1435
1436                                 physaddr = sg_dma_address(data_sg);
1437                                 dma_len = sg_dma_len(data_sg);
1438                                 sgl->addr_lo = cpu_to_le32(
1439                                                          putPaddrLow(physaddr));
1440                                 sgl->addr_hi = cpu_to_le32(
1441                                                         putPaddrHigh(physaddr));
1442
1443                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1444                                 sgl->word2 = cpu_to_le32(sgl->word2);
1445                                 sgl->sge_len = cpu_to_le32(dma_len);
1446
1447                                 dma_offset += dma_len;
1448                                 data_sg = sg_next(data_sg);
1449
1450                                 sgl++;
1451
1452                                 lsp_just_set = false;
1453                         } else {
1454                                 sgl->word2 = cpu_to_le32(sgl->word2);
1455
1456                                 sgl->sge_len = cpu_to_le32(
1457                                                      phba->cfg_sg_dma_buf_size);
1458
1459                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1460                                 i = i - 1;
1461
1462                                 lsp_just_set = true;
1463                         }
1464
1465                         j++;
1466                 }
1467
1468                 /* PBDE support for first data SGE only */
1469                 if (nseg == 1 && phba->cfg_enable_pbde) {
1470                         /* Words 13-15 */
1471                         bde = (struct ulp_bde64 *)
1472                                 &wqe->words[13];
1473                         bde->addrLow = first_data_sgl->addr_lo;
1474                         bde->addrHigh = first_data_sgl->addr_hi;
1475                         bde->tus.f.bdeSize =
1476                                 le32_to_cpu(first_data_sgl->sge_len);
1477                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1478                         bde->tus.w = cpu_to_le32(bde->tus.w);
1479
1480                         /* Word 11 - set PBDE bit */
1481                         bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1482                 } else {
1483                         memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1484                         /* Word 11 - PBDE bit disabled by default template */
1485                 }
1486
1487         } else {
1488                 lpfc_ncmd->seg_cnt = 0;
1489
1490                 /* For this clause to be valid, the payload_length
1491                  * and sg_cnt must zero.
1492                  */
1493                 if (nCmd->payload_length != 0) {
1494                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1495                                         "6063 NVME DMA Prep Err: sg_cnt %d "
1496                                         "payload_length x%x\n",
1497                                         nCmd->sg_cnt, nCmd->payload_length);
1498                         return 1;
1499                 }
1500         }
1501         return 0;
1502 }
1503
1504 /**
1505  * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1506  * @pnvme_lport: Pointer to the driver's local port data
1507  * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1508  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1509  * @pnvme_fcreq: IO request from nvme fc to driver.
1510  *
1511  * Driver registers this routine as it io request handler.  This
1512  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1513  * data structure to the rport indicated in @lpfc_nvme_rport.
1514  *
1515  * Return value :
1516  *   0 - Success
1517  *   TODO: What are the failure codes.
1518  **/
1519 static int
1520 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1521                         struct nvme_fc_remote_port *pnvme_rport,
1522                         void *hw_queue_handle,
1523                         struct nvmefc_fcp_req *pnvme_fcreq)
1524 {
1525         int ret = 0;
1526         int expedite = 0;
1527         int idx, cpu;
1528         struct lpfc_nvme_lport *lport;
1529         struct lpfc_fc4_ctrl_stat *cstat;
1530         struct lpfc_vport *vport;
1531         struct lpfc_hba *phba;
1532         struct lpfc_nodelist *ndlp;
1533         struct lpfc_io_buf *lpfc_ncmd;
1534         struct lpfc_nvme_rport *rport;
1535         struct lpfc_nvme_qhandle *lpfc_queue_info;
1536         struct lpfc_nvme_fcpreq_priv *freqpriv;
1537         struct nvme_common_command *sqe;
1538         uint64_t start = 0;
1539 #if (IS_ENABLED(CONFIG_NVME_FC))
1540         u8 *uuid = NULL;
1541         int err;
1542         enum dma_data_direction iodir;
1543 #endif
1544
1545         /* Validate pointers. LLDD fault handling with transport does
1546          * have timing races.
1547          */
1548         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1549         if (unlikely(!lport)) {
1550                 ret = -EINVAL;
1551                 goto out_fail;
1552         }
1553
1554         vport = lport->vport;
1555
1556         if (unlikely(!hw_queue_handle)) {
1557                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1558                                  "6117 Fail IO, NULL hw_queue_handle\n");
1559                 atomic_inc(&lport->xmt_fcp_err);
1560                 ret = -EBUSY;
1561                 goto out_fail;
1562         }
1563
1564         phba = vport->phba;
1565
1566         if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
1567             phba->hba_flag & HBA_IOQ_FLUSH) {
1568                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1569                                  "6124 Fail IO, Driver unload\n");
1570                 atomic_inc(&lport->xmt_fcp_err);
1571                 ret = -ENODEV;
1572                 goto out_fail;
1573         }
1574
1575         freqpriv = pnvme_fcreq->private;
1576         if (unlikely(!freqpriv)) {
1577                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1578                                  "6158 Fail IO, NULL request data\n");
1579                 atomic_inc(&lport->xmt_fcp_err);
1580                 ret = -EINVAL;
1581                 goto out_fail;
1582         }
1583
1584 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1585         if (phba->ktime_on)
1586                 start = ktime_get_ns();
1587 #endif
1588         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1589         lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1590
1591         /*
1592          * Catch race where our node has transitioned, but the
1593          * transport is still transitioning.
1594          */
1595         ndlp = rport->ndlp;
1596         if (!ndlp) {
1597                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1598                                  "6053 Busy IO, ndlp not ready: rport x%px "
1599                                   "ndlp x%px, DID x%06x\n",
1600                                  rport, ndlp, pnvme_rport->port_id);
1601                 atomic_inc(&lport->xmt_fcp_err);
1602                 ret = -EBUSY;
1603                 goto out_fail;
1604         }
1605
1606         /* The remote node has to be a mapped target or it's an error. */
1607         if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1608             (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1609                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1610                                  "6036 Fail IO, DID x%06x not ready for "
1611                                  "IO. State x%x, Type x%x Flg x%x\n",
1612                                  pnvme_rport->port_id,
1613                                  ndlp->nlp_state, ndlp->nlp_type,
1614                                  ndlp->fc4_xpt_flags);
1615                 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1616                 ret = -EBUSY;
1617                 goto out_fail;
1618
1619         }
1620
1621         /* Currently only NVME Keep alive commands should be expedited
1622          * if the driver runs out of a resource. These should only be
1623          * issued on the admin queue, qidx 0
1624          */
1625         if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1626                 sqe = &((struct nvme_fc_cmd_iu *)
1627                         pnvme_fcreq->cmdaddr)->sqe.common;
1628                 if (sqe->opcode == nvme_admin_keep_alive)
1629                         expedite = 1;
1630         }
1631
1632         /* Check if IO qualifies for CMF */
1633         if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1634             pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
1635             pnvme_fcreq->payload_length) {
1636                 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
1637                 if (ret) {
1638                         ret = -EBUSY;
1639                         goto out_fail;
1640                 }
1641                 /* Get start time for IO latency */
1642                 start = ktime_get_ns();
1643         }
1644
1645         /* The node is shared with FCP IO, make sure the IO pending count does
1646          * not exceed the programmed depth.
1647          */
1648         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1649                 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1650                     !expedite) {
1651                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1652                                          "6174 Fail IO, ndlp qdepth exceeded: "
1653                                          "idx %d DID %x pend %d qdepth %d\n",
1654                                          lpfc_queue_info->index, ndlp->nlp_DID,
1655                                          atomic_read(&ndlp->cmd_pending),
1656                                          ndlp->cmd_qdepth);
1657                         atomic_inc(&lport->xmt_fcp_qdepth);
1658                         ret = -EBUSY;
1659                         goto out_fail1;
1660                 }
1661         }
1662
1663         /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1664         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1665                 idx = lpfc_queue_info->index;
1666         } else {
1667                 cpu = raw_smp_processor_id();
1668                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1669         }
1670
1671         lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1672         if (lpfc_ncmd == NULL) {
1673                 atomic_inc(&lport->xmt_fcp_noxri);
1674                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1675                                  "6065 Fail IO, driver buffer pool is empty: "
1676                                  "idx %d DID %x\n",
1677                                  lpfc_queue_info->index, ndlp->nlp_DID);
1678                 ret = -EBUSY;
1679                 goto out_fail1;
1680         }
1681 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1682         if (start) {
1683                 lpfc_ncmd->ts_cmd_start = start;
1684                 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1685         } else {
1686                 lpfc_ncmd->ts_cmd_start = 0;
1687         }
1688 #endif
1689         lpfc_ncmd->rx_cmd_start = start;
1690
1691         /*
1692          * Store the data needed by the driver to issue, abort, and complete
1693          * an IO.
1694          * Do not let the IO hang out forever.  There is no midlayer issuing
1695          * an abort so inform the FW of the maximum IO pending time.
1696          */
1697         freqpriv->nvme_buf = lpfc_ncmd;
1698         lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1699         lpfc_ncmd->ndlp = ndlp;
1700         lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1701
1702 #if (IS_ENABLED(CONFIG_NVME_FC))
1703         /* check the necessary and sufficient condition to support VMID */
1704         if (lpfc_is_vmid_enabled(phba) &&
1705             (ndlp->vmid_support ||
1706              phba->pport->vmid_priority_tagging ==
1707              LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
1708                 /* is the I/O generated by a VM, get the associated virtual */
1709                 /* entity id */
1710                 uuid = nvme_fc_io_getuuid(pnvme_fcreq);
1711
1712                 if (uuid) {
1713                         if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
1714                                 iodir = DMA_TO_DEVICE;
1715                         else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
1716                                 iodir = DMA_FROM_DEVICE;
1717                         else
1718                                 iodir = DMA_NONE;
1719
1720                         err = lpfc_vmid_get_appid(vport, uuid, iodir,
1721                                         (union lpfc_vmid_io_tag *)
1722                                                 &lpfc_ncmd->cur_iocbq.vmid_tag);
1723                         if (!err)
1724                                 lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
1725                 }
1726         }
1727 #endif
1728
1729         /*
1730          * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1731          * This identfier was create in our hardware queue create callback
1732          * routine. The driver now is dependent on the IO queue steering from
1733          * the transport.  We are trusting the upper NVME layers know which
1734          * index to use and that they have affinitized a CPU to this hardware
1735          * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1736          */
1737         lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1738         cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1739
1740         lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1741         ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1742         if (ret) {
1743                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1744                                  "6175 Fail IO, Prep DMA: "
1745                                  "idx %d DID %x\n",
1746                                  lpfc_queue_info->index, ndlp->nlp_DID);
1747                 atomic_inc(&lport->xmt_fcp_err);
1748                 ret = -ENOMEM;
1749                 goto out_free_nvme_buf;
1750         }
1751
1752         lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1753                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1754                          lpfc_queue_info->index, ndlp->nlp_DID);
1755
1756         ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1757         if (ret) {
1758                 atomic_inc(&lport->xmt_fcp_wqerr);
1759                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1760                                  "6113 Fail IO, Could not issue WQE err %x "
1761                                  "sid: x%x did: x%x oxid: x%x\n",
1762                                  ret, vport->fc_myDID, ndlp->nlp_DID,
1763                                  lpfc_ncmd->cur_iocbq.sli4_xritag);
1764                 goto out_free_nvme_buf;
1765         }
1766
1767         if (phba->cfg_xri_rebalancing)
1768                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1769
1770 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1771         if (lpfc_ncmd->ts_cmd_start)
1772                 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1773
1774         if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1775                 cpu = raw_smp_processor_id();
1776                 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1777                 lpfc_ncmd->cpu = cpu;
1778                 if (idx != cpu)
1779                         lpfc_printf_vlog(vport,
1780                                          KERN_INFO, LOG_NVME_IOERR,
1781                                         "6702 CPU Check cmd: "
1782                                         "cpu %d wq %d\n",
1783                                         lpfc_ncmd->cpu,
1784                                         lpfc_queue_info->index);
1785         }
1786 #endif
1787         return 0;
1788
1789  out_free_nvme_buf:
1790         if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1791                 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1792                         cstat->output_requests--;
1793                 else
1794                         cstat->input_requests--;
1795         } else
1796                 cstat->control_requests--;
1797         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1798  out_fail1:
1799         lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
1800                              pnvme_fcreq->payload_length, NULL);
1801  out_fail:
1802         return ret;
1803 }
1804
1805 /**
1806  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1807  * @phba: Pointer to HBA context object
1808  * @cmdiocb: Pointer to command iocb object.
1809  * @rspiocb: Pointer to response iocb object.
1810  *
1811  * This is the callback function for any NVME FCP IO that was aborted.
1812  *
1813  * Return value:
1814  *   None
1815  **/
1816 void
1817 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1818                            struct lpfc_iocbq *rspiocb)
1819 {
1820         struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
1821
1822         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1823                         "6145 ABORT_XRI_CN completing on rpi x%x "
1824                         "original iotag x%x, abort cmd iotag x%x "
1825                         "req_tag x%x, status x%x, hwstatus x%x\n",
1826                         bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
1827                         get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
1828                         bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1829                         bf_get(lpfc_wcqe_c_status, abts_cmpl),
1830                         bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1831         lpfc_sli_release_iocbq(phba, cmdiocb);
1832 }
1833
1834 /**
1835  * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1836  * @pnvme_lport: Pointer to the driver's local port data
1837  * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1838  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1839  * @pnvme_fcreq: IO request from nvme fc to driver.
1840  *
1841  * Driver registers this routine as its nvme request io abort handler.  This
1842  * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1843  * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1844  * is executed asynchronously - one the target is validated as "MAPPED" and
1845  * ready for IO, the driver issues the abort request and returns.
1846  *
1847  * Return value:
1848  *   None
1849  **/
1850 static void
1851 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1852                     struct nvme_fc_remote_port *pnvme_rport,
1853                     void *hw_queue_handle,
1854                     struct nvmefc_fcp_req *pnvme_fcreq)
1855 {
1856         struct lpfc_nvme_lport *lport;
1857         struct lpfc_vport *vport;
1858         struct lpfc_hba *phba;
1859         struct lpfc_io_buf *lpfc_nbuf;
1860         struct lpfc_iocbq *nvmereq_wqe;
1861         struct lpfc_nvme_fcpreq_priv *freqpriv;
1862         unsigned long flags;
1863         int ret_val;
1864         struct nvme_fc_cmd_iu *cp;
1865
1866         /* Validate pointers. LLDD fault handling with transport does
1867          * have timing races.
1868          */
1869         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1870         if (unlikely(!lport))
1871                 return;
1872
1873         vport = lport->vport;
1874
1875         if (unlikely(!hw_queue_handle)) {
1876                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1877                                  "6129 Fail Abort, HW Queue Handle NULL.\n");
1878                 return;
1879         }
1880
1881         phba = vport->phba;
1882         freqpriv = pnvme_fcreq->private;
1883
1884         if (unlikely(!freqpriv))
1885                 return;
1886         if (vport->load_flag & FC_UNLOADING)
1887                 return;
1888
1889         /* Announce entry to new IO submit field. */
1890         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1891                          "6002 Abort Request to rport DID x%06x "
1892                          "for nvme_fc_req x%px\n",
1893                          pnvme_rport->port_id,
1894                          pnvme_fcreq);
1895
1896         /* If the hba is getting reset, this flag is set.  It is
1897          * cleared when the reset is complete and rings reestablished.
1898          */
1899         spin_lock_irqsave(&phba->hbalock, flags);
1900         /* driver queued commands are in process of being flushed */
1901         if (phba->hba_flag & HBA_IOQ_FLUSH) {
1902                 spin_unlock_irqrestore(&phba->hbalock, flags);
1903                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1904                                  "6139 Driver in reset cleanup - flushing "
1905                                  "NVME Req now.  hba_flag x%x\n",
1906                                  phba->hba_flag);
1907                 return;
1908         }
1909
1910         lpfc_nbuf = freqpriv->nvme_buf;
1911         if (!lpfc_nbuf) {
1912                 spin_unlock_irqrestore(&phba->hbalock, flags);
1913                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1914                                  "6140 NVME IO req has no matching lpfc nvme "
1915                                  "io buffer.  Skipping abort req.\n");
1916                 return;
1917         } else if (!lpfc_nbuf->nvmeCmd) {
1918                 spin_unlock_irqrestore(&phba->hbalock, flags);
1919                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1920                                  "6141 lpfc NVME IO req has no nvme_fcreq "
1921                                  "io buffer.  Skipping abort req.\n");
1922                 return;
1923         }
1924         nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1925
1926         /* Guard against IO completion being called at same time */
1927         spin_lock(&lpfc_nbuf->buf_lock);
1928
1929         /*
1930          * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1931          * state must match the nvme_fcreq passed by the nvme
1932          * transport.  If they don't match, it is likely the driver
1933          * has already completed the NVME IO and the nvme transport
1934          * has not seen it yet.
1935          */
1936         if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1937                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1938                                  "6143 NVME req mismatch: "
1939                                  "lpfc_nbuf x%px nvmeCmd x%px, "
1940                                  "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
1941                                  lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1942                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1943                 goto out_unlock;
1944         }
1945
1946         /* Don't abort IOs no longer on the pending queue. */
1947         if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
1948                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1949                                  "6142 NVME IO req x%px not queued - skipping "
1950                                  "abort req xri x%x\n",
1951                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1952                 goto out_unlock;
1953         }
1954
1955         atomic_inc(&lport->xmt_fcp_abort);
1956         lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1957                          nvmereq_wqe->sli4_xritag,
1958                          nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1959
1960         /* Outstanding abort is in progress */
1961         if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
1962                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1963                                  "6144 Outstanding NVME I/O Abort Request "
1964                                  "still pending on nvme_fcreq x%px, "
1965                                  "lpfc_ncmd x%px xri x%x\n",
1966                                  pnvme_fcreq, lpfc_nbuf,
1967                                  nvmereq_wqe->sli4_xritag);
1968                 goto out_unlock;
1969         }
1970
1971         ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1972                                               lpfc_nvme_abort_fcreq_cmpl);
1973
1974         spin_unlock(&lpfc_nbuf->buf_lock);
1975         spin_unlock_irqrestore(&phba->hbalock, flags);
1976
1977         /* Make sure HBA is alive */
1978         lpfc_issue_hb_tmo(phba);
1979
1980         if (ret_val != WQE_SUCCESS) {
1981                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1982                                  "6137 Failed abts issue_wqe with status x%x "
1983                                  "for nvme_fcreq x%px.\n",
1984                                  ret_val, pnvme_fcreq);
1985                 return;
1986         }
1987
1988         /*
1989          * Get Command Id from cmd to plug into response. This
1990          * code is not needed in the next NVME Transport drop.
1991          */
1992         cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
1993         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1994                          "6138 Transport Abort NVME Request Issued for "
1995                          "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
1996                          nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
1997                          cp->sqe.common.command_id);
1998         return;
1999
2000 out_unlock:
2001         spin_unlock(&lpfc_nbuf->buf_lock);
2002         spin_unlock_irqrestore(&phba->hbalock, flags);
2003         return;
2004 }
2005
2006 /* Declare and initialization an instance of the FC NVME template. */
2007 static struct nvme_fc_port_template lpfc_nvme_template = {
2008         /* initiator-based functions */
2009         .localport_delete  = lpfc_nvme_localport_delete,
2010         .remoteport_delete = lpfc_nvme_remoteport_delete,
2011         .create_queue = lpfc_nvme_create_queue,
2012         .delete_queue = lpfc_nvme_delete_queue,
2013         .ls_req       = lpfc_nvme_ls_req,
2014         .fcp_io       = lpfc_nvme_fcp_io_submit,
2015         .ls_abort     = lpfc_nvme_ls_abort,
2016         .fcp_abort    = lpfc_nvme_fcp_abort,
2017         .xmt_ls_rsp   = lpfc_nvme_xmt_ls_rsp,
2018
2019         .max_hw_queues = 1,
2020         .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2021         .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
2022         .dma_boundary = 0xFFFFFFFF,
2023
2024         /* Sizes of additional private data for data structures.
2025          * No use for the last two sizes at this time.
2026          */
2027         .local_priv_sz = sizeof(struct lpfc_nvme_lport),
2028         .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
2029         .lsrqst_priv_sz = 0,
2030         .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
2031 };
2032
2033 /*
2034  * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
2035  *
2036  * This routine removes a nvme buffer from head of @hdwq io_buf_list
2037  * and returns to caller.
2038  *
2039  * Return codes:
2040  *   NULL - Error
2041  *   Pointer to lpfc_nvme_buf - Success
2042  **/
2043 static struct lpfc_io_buf *
2044 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
2045                   int idx, int expedite)
2046 {
2047         struct lpfc_io_buf *lpfc_ncmd;
2048         struct lpfc_sli4_hdw_queue *qp;
2049         struct sli4_sge *sgl;
2050         struct lpfc_iocbq *pwqeq;
2051         union lpfc_wqe128 *wqe;
2052
2053         lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
2054
2055         if (lpfc_ncmd) {
2056                 pwqeq = &(lpfc_ncmd->cur_iocbq);
2057                 wqe = &pwqeq->wqe;
2058
2059                 /* Setup key fields in buffer that may have been changed
2060                  * if other protocols used this buffer.
2061                  */
2062                 pwqeq->cmd_flag = LPFC_IO_NVME;
2063                 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
2064                 lpfc_ncmd->start_time = jiffies;
2065                 lpfc_ncmd->flags = 0;
2066
2067                 /* Rsp SGE will be filled in when we rcv an IO
2068                  * from the NVME Layer to be sent.
2069                  * The cmd is going to be embedded so we need a SKIP SGE.
2070                  */
2071                 sgl = lpfc_ncmd->dma_sgl;
2072                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2073                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2074                 sgl->word2 = cpu_to_le32(sgl->word2);
2075                 /* Fill in word 3 / sgl_len during cmd submission */
2076
2077                 /* Initialize 64 bytes only */
2078                 memset(wqe, 0, sizeof(union lpfc_wqe));
2079
2080                 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2081                         atomic_inc(&ndlp->cmd_pending);
2082                         lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2083                 }
2084
2085         } else {
2086                 qp = &phba->sli4_hba.hdwq[idx];
2087                 qp->empty_io_bufs++;
2088         }
2089
2090         return  lpfc_ncmd;
2091 }
2092
2093 /**
2094  * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2095  * @phba: The Hba for which this call is being executed.
2096  * @lpfc_ncmd: The nvme buffer which is being released.
2097  *
2098  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2099  * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2100  * and cannot be reused for at least RA_TOV amount of time if it was
2101  * aborted.
2102  **/
2103 static void
2104 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2105 {
2106         struct lpfc_sli4_hdw_queue *qp;
2107         unsigned long iflag = 0;
2108
2109         if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2110                 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2111
2112         lpfc_ncmd->ndlp = NULL;
2113         lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2114
2115         qp = lpfc_ncmd->hdwq;
2116         if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2117                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2118                                 "6310 XB release deferred for "
2119                                 "ox_id x%x on reqtag x%x\n",
2120                                 lpfc_ncmd->cur_iocbq.sli4_xritag,
2121                                 lpfc_ncmd->cur_iocbq.iotag);
2122
2123                 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2124                 list_add_tail(&lpfc_ncmd->list,
2125                         &qp->lpfc_abts_io_buf_list);
2126                 qp->abts_nvme_io_bufs++;
2127                 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2128         } else
2129                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2130 }
2131
2132 /**
2133  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2134  * @vport: the lpfc_vport instance requesting a localport.
2135  *
2136  * This routine is invoked to create an nvme localport instance to bind
2137  * to the nvme_fc_transport.  It is called once during driver load
2138  * like lpfc_create_shost after all other services are initialized.
2139  * It requires a vport, vpi, and wwns at call time.  Other localport
2140  * parameters are modified as the driver's FCID and the Fabric WWN
2141  * are established.
2142  *
2143  * Return codes
2144  *      0 - successful
2145  *      -ENOMEM - no heap memory available
2146  *      other values - from nvme registration upcall
2147  **/
2148 int
2149 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2150 {
2151         int ret = 0;
2152         struct lpfc_hba  *phba = vport->phba;
2153         struct nvme_fc_port_info nfcp_info;
2154         struct nvme_fc_local_port *localport;
2155         struct lpfc_nvme_lport *lport;
2156
2157         /* Initialize this localport instance.  The vport wwn usage ensures
2158          * that NPIV is accounted for.
2159          */
2160         memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2161         nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2162         nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2163         nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2164
2165         /* We need to tell the transport layer + 1 because it takes page
2166          * alignment into account. When space for the SGL is allocated we
2167          * allocate + 3, one for cmd, one for rsp and one for this alignment
2168          */
2169         lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2170
2171         /* Advertise how many hw queues we support based on cfg_hdw_queue,
2172          * which will not exceed cpu count.
2173          */
2174         lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2175
2176         if (!IS_ENABLED(CONFIG_NVME_FC))
2177                 return ret;
2178
2179         /* localport is allocated from the stack, but the registration
2180          * call allocates heap memory as well as the private area.
2181          */
2182
2183         ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2184                                          &vport->phba->pcidev->dev, &localport);
2185         if (!ret) {
2186                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2187                                  "6005 Successfully registered local "
2188                                  "NVME port num %d, localP x%px, private "
2189                                  "x%px, sg_seg %d\n",
2190                                  localport->port_num, localport,
2191                                  localport->private,
2192                                  lpfc_nvme_template.max_sgl_segments);
2193
2194                 /* Private is our lport size declared in the template. */
2195                 lport = (struct lpfc_nvme_lport *)localport->private;
2196                 vport->localport = localport;
2197                 lport->vport = vport;
2198                 vport->nvmei_support = 1;
2199
2200                 atomic_set(&lport->xmt_fcp_noxri, 0);
2201                 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2202                 atomic_set(&lport->xmt_fcp_qdepth, 0);
2203                 atomic_set(&lport->xmt_fcp_err, 0);
2204                 atomic_set(&lport->xmt_fcp_wqerr, 0);
2205                 atomic_set(&lport->xmt_fcp_abort, 0);
2206                 atomic_set(&lport->xmt_ls_abort, 0);
2207                 atomic_set(&lport->xmt_ls_err, 0);
2208                 atomic_set(&lport->cmpl_fcp_xb, 0);
2209                 atomic_set(&lport->cmpl_fcp_err, 0);
2210                 atomic_set(&lport->cmpl_ls_xb, 0);
2211                 atomic_set(&lport->cmpl_ls_err, 0);
2212
2213                 atomic_set(&lport->fc4NvmeLsRequests, 0);
2214                 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2215         }
2216
2217         return ret;
2218 }
2219
2220 #if (IS_ENABLED(CONFIG_NVME_FC))
2221 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2222  *
2223  * The driver has to wait for the host nvme transport to callback
2224  * indicating the localport has successfully unregistered all
2225  * resources.  Since this is an uninterruptible wait, loop every ten
2226  * seconds and print a message indicating no progress.
2227  *
2228  * An uninterruptible wait is used because of the risk of transport-to-
2229  * driver state mismatch.
2230  */
2231 static void
2232 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2233                            struct lpfc_nvme_lport *lport,
2234                            struct completion *lport_unreg_cmp)
2235 {
2236         u32 wait_tmo;
2237         int ret, i, pending = 0;
2238         struct lpfc_sli_ring  *pring;
2239         struct lpfc_hba  *phba = vport->phba;
2240         struct lpfc_sli4_hdw_queue *qp;
2241         int abts_scsi, abts_nvme;
2242
2243         /* Host transport has to clean up and confirm requiring an indefinite
2244          * wait. Print a message if a 10 second wait expires and renew the
2245          * wait. This is unexpected.
2246          */
2247         wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2248         while (true) {
2249                 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2250                 if (unlikely(!ret)) {
2251                         pending = 0;
2252                         abts_scsi = 0;
2253                         abts_nvme = 0;
2254                         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2255                                 qp = &phba->sli4_hba.hdwq[i];
2256                                 if (!vport->localport || !qp || !qp->io_wq)
2257                                         return;
2258
2259                                 pring = qp->io_wq->pring;
2260                                 if (!pring)
2261                                         continue;
2262                                 pending += pring->txcmplq_cnt;
2263                                 abts_scsi += qp->abts_scsi_io_bufs;
2264                                 abts_nvme += qp->abts_nvme_io_bufs;
2265                         }
2266                         if (!vport->localport ||
2267                             test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
2268                             vport->load_flag & FC_UNLOADING)
2269                                 return;
2270
2271                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2272                                          "6176 Lport x%px Localport x%px wait "
2273                                          "timed out. Pending %d [%d:%d]. "
2274                                          "Renewing.\n",
2275                                          lport, vport->localport, pending,
2276                                          abts_scsi, abts_nvme);
2277                         continue;
2278                 }
2279                 break;
2280         }
2281         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2282                          "6177 Lport x%px Localport x%px Complete Success\n",
2283                          lport, vport->localport);
2284 }
2285 #endif
2286
2287 /**
2288  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2289  * @vport: pointer to a host virtual N_Port data structure
2290  *
2291  * This routine is invoked to destroy all lports bound to the phba.
2292  * The lport memory was allocated by the nvme fc transport and is
2293  * released there.  This routine ensures all rports bound to the
2294  * lport have been disconnected.
2295  *
2296  **/
2297 void
2298 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2299 {
2300 #if (IS_ENABLED(CONFIG_NVME_FC))
2301         struct nvme_fc_local_port *localport;
2302         struct lpfc_nvme_lport *lport;
2303         int ret;
2304         DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2305
2306         if (vport->nvmei_support == 0)
2307                 return;
2308
2309         localport = vport->localport;
2310         if (!localport)
2311                 return;
2312         lport = (struct lpfc_nvme_lport *)localport->private;
2313
2314         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2315                          "6011 Destroying NVME localport x%px\n",
2316                          localport);
2317
2318         /* lport's rport list is clear.  Unregister
2319          * lport and release resources.
2320          */
2321         lport->lport_unreg_cmp = &lport_unreg_cmp;
2322         ret = nvme_fc_unregister_localport(localport);
2323
2324         /* Wait for completion.  This either blocks
2325          * indefinitely or succeeds
2326          */
2327         lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2328         vport->localport = NULL;
2329
2330         /* Regardless of the unregister upcall response, clear
2331          * nvmei_support.  All rports are unregistered and the
2332          * driver will clean up.
2333          */
2334         vport->nvmei_support = 0;
2335         if (ret == 0) {
2336                 lpfc_printf_vlog(vport,
2337                                  KERN_INFO, LOG_NVME_DISC,
2338                                  "6009 Unregistered lport Success\n");
2339         } else {
2340                 lpfc_printf_vlog(vport,
2341                                  KERN_INFO, LOG_NVME_DISC,
2342                                  "6010 Unregistered lport "
2343                                  "Failed, status x%x\n",
2344                                  ret);
2345         }
2346 #endif
2347 }
2348
2349 void
2350 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2351 {
2352 #if (IS_ENABLED(CONFIG_NVME_FC))
2353         struct nvme_fc_local_port *localport;
2354         struct lpfc_nvme_lport *lport;
2355
2356         localport = vport->localport;
2357         if (!localport) {
2358                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2359                                  "6710 Update NVME fail. No localport\n");
2360                 return;
2361         }
2362         lport = (struct lpfc_nvme_lport *)localport->private;
2363         if (!lport) {
2364                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2365                                  "6171 Update NVME fail. localP x%px, No lport\n",
2366                                  localport);
2367                 return;
2368         }
2369         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2370                          "6012 Update NVME lport x%px did x%x\n",
2371                          localport, vport->fc_myDID);
2372
2373         localport->port_id = vport->fc_myDID;
2374         if (localport->port_id == 0)
2375                 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2376         else
2377                 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2378
2379         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2380                          "6030 bound lport x%px to DID x%06x\n",
2381                          lport, localport->port_id);
2382 #endif
2383 }
2384
2385 int
2386 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2387 {
2388 #if (IS_ENABLED(CONFIG_NVME_FC))
2389         int ret = 0;
2390         struct nvme_fc_local_port *localport;
2391         struct lpfc_nvme_lport *lport;
2392         struct lpfc_nvme_rport *rport;
2393         struct lpfc_nvme_rport *oldrport;
2394         struct nvme_fc_remote_port *remote_port;
2395         struct nvme_fc_port_info rpinfo;
2396         struct lpfc_nodelist *prev_ndlp = NULL;
2397         struct fc_rport *srport = ndlp->rport;
2398
2399         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2400                          "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2401                          ndlp->nlp_DID, ndlp->nlp_type);
2402
2403         localport = vport->localport;
2404         if (!localport)
2405                 return 0;
2406
2407         lport = (struct lpfc_nvme_lport *)localport->private;
2408
2409         /* NVME rports are not preserved across devloss.
2410          * Just register this instance.  Note, rpinfo->dev_loss_tmo
2411          * is left 0 to indicate accept transport defaults.  The
2412          * driver communicates port role capabilities consistent
2413          * with the PRLI response data.
2414          */
2415         memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2416         rpinfo.port_id = ndlp->nlp_DID;
2417         if (ndlp->nlp_type & NLP_NVME_TARGET)
2418                 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2419         if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2420                 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2421
2422         if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2423                 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2424
2425         rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2426         rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2427         if (srport)
2428                 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2429         else
2430                 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2431
2432         spin_lock_irq(&ndlp->lock);
2433
2434         /* If an oldrport exists, so does the ndlp reference.  If not
2435          * a new reference is needed because either the node has never
2436          * been registered or it's been unregistered and getting deleted.
2437          */
2438         oldrport = lpfc_ndlp_get_nrport(ndlp);
2439         if (oldrport) {
2440                 prev_ndlp = oldrport->ndlp;
2441                 spin_unlock_irq(&ndlp->lock);
2442         } else {
2443                 spin_unlock_irq(&ndlp->lock);
2444                 if (!lpfc_nlp_get(ndlp)) {
2445                         dev_warn(&vport->phba->pcidev->dev,
2446                                  "Warning - No node ref - exit register\n");
2447                         return 0;
2448                 }
2449         }
2450
2451         ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2452         if (!ret) {
2453                 /* If the ndlp already has an nrport, this is just
2454                  * a resume of the existing rport.  Else this is a
2455                  * new rport.
2456                  */
2457                 /* Guard against an unregister/reregister
2458                  * race that leaves the WAIT flag set.
2459                  */
2460                 spin_lock_irq(&ndlp->lock);
2461                 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2462                 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2463                 spin_unlock_irq(&ndlp->lock);
2464                 rport = remote_port->private;
2465                 if (oldrport) {
2466
2467                         /* Sever the ndlp<->rport association
2468                          * before dropping the ndlp ref from
2469                          * register.
2470                          */
2471                         spin_lock_irq(&ndlp->lock);
2472                         ndlp->nrport = NULL;
2473                         ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2474                         spin_unlock_irq(&ndlp->lock);
2475                         rport->ndlp = NULL;
2476                         rport->remoteport = NULL;
2477
2478                         /* Reference only removed if previous NDLP is no longer
2479                          * active. It might be just a swap and removing the
2480                          * reference would cause a premature cleanup.
2481                          */
2482                         if (prev_ndlp && prev_ndlp != ndlp) {
2483                                 if (!prev_ndlp->nrport)
2484                                         lpfc_nlp_put(prev_ndlp);
2485                         }
2486                 }
2487
2488                 /* Clean bind the rport to the ndlp. */
2489                 rport->remoteport = remote_port;
2490                 rport->lport = lport;
2491                 rport->ndlp = ndlp;
2492                 spin_lock_irq(&ndlp->lock);
2493                 ndlp->nrport = rport;
2494                 spin_unlock_irq(&ndlp->lock);
2495                 lpfc_printf_vlog(vport, KERN_INFO,
2496                                  LOG_NVME_DISC | LOG_NODE,
2497                                  "6022 Bind lport x%px to remoteport x%px "
2498                                  "rport x%px WWNN 0x%llx, "
2499                                  "Rport WWPN 0x%llx DID "
2500                                  "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2501                                  lport, remote_port, rport,
2502                                  rpinfo.node_name, rpinfo.port_name,
2503                                  rpinfo.port_id, rpinfo.port_role,
2504                                  ndlp, prev_ndlp);
2505         } else {
2506                 lpfc_printf_vlog(vport, KERN_ERR,
2507                                  LOG_TRACE_EVENT,
2508                                  "6031 RemotePort Registration failed "
2509                                  "err: %d, DID x%06x\n",
2510                                  ret, ndlp->nlp_DID);
2511         }
2512
2513         return ret;
2514 #else
2515         return 0;
2516 #endif
2517 }
2518
2519 /*
2520  * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2521  *
2522  * If the ndlp represents an NVME Target, that we are logged into,
2523  * ping the NVME FC Transport layer to initiate a device rescan
2524  * on this remote NPort.
2525  */
2526 void
2527 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2528 {
2529 #if (IS_ENABLED(CONFIG_NVME_FC))
2530         struct lpfc_nvme_rport *nrport;
2531         struct nvme_fc_remote_port *remoteport = NULL;
2532
2533         spin_lock_irq(&ndlp->lock);
2534         nrport = lpfc_ndlp_get_nrport(ndlp);
2535         if (nrport)
2536                 remoteport = nrport->remoteport;
2537         spin_unlock_irq(&ndlp->lock);
2538
2539         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2540                          "6170 Rescan NPort DID x%06x type x%x "
2541                          "state x%x nrport x%px remoteport x%px\n",
2542                          ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2543                          nrport, remoteport);
2544
2545         if (!nrport || !remoteport)
2546                 goto rescan_exit;
2547
2548         /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
2549         if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2550             ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2551                 nvme_fc_rescan_remoteport(remoteport);
2552
2553                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2554                                  "6172 NVME rescanned DID x%06x "
2555                                  "port_state x%x\n",
2556                                  ndlp->nlp_DID, remoteport->port_state);
2557         }
2558         return;
2559  rescan_exit:
2560         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2561                          "6169 Skip NVME Rport Rescan, NVME remoteport "
2562                          "unregistered\n");
2563 #endif
2564 }
2565
2566 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2567  *
2568  * There is no notion of Devloss or rport recovery from the current
2569  * nvme_transport perspective.  Loss of an rport just means IO cannot
2570  * be sent and recovery is completely up to the initator.
2571  * For now, the driver just unbinds the DID and port_role so that
2572  * no further IO can be issued.  Changes are planned for later.
2573  *
2574  * Notes - the ndlp reference count is not decremented here since
2575  * since there is no nvme_transport api for devloss.  Node ref count
2576  * is only adjusted in driver unload.
2577  */
2578 void
2579 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2580 {
2581 #if (IS_ENABLED(CONFIG_NVME_FC))
2582         int ret;
2583         struct nvme_fc_local_port *localport;
2584         struct lpfc_nvme_lport *lport;
2585         struct lpfc_nvme_rport *rport;
2586         struct nvme_fc_remote_port *remoteport = NULL;
2587
2588         localport = vport->localport;
2589
2590         /* This is fundamental error.  The localport is always
2591          * available until driver unload.  Just exit.
2592          */
2593         if (!localport)
2594                 return;
2595
2596         lport = (struct lpfc_nvme_lport *)localport->private;
2597         if (!lport)
2598                 goto input_err;
2599
2600         spin_lock_irq(&ndlp->lock);
2601         rport = lpfc_ndlp_get_nrport(ndlp);
2602         if (rport)
2603                 remoteport = rport->remoteport;
2604         spin_unlock_irq(&ndlp->lock);
2605         if (!remoteport)
2606                 goto input_err;
2607
2608         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2609                          "6033 Unreg nvme remoteport x%px, portname x%llx, "
2610                          "port_id x%06x, portstate x%x port type x%x "
2611                          "refcnt %d\n",
2612                          remoteport, remoteport->port_name,
2613                          remoteport->port_id, remoteport->port_state,
2614                          ndlp->nlp_type, kref_read(&ndlp->kref));
2615
2616         /* Sanity check ndlp type.  Only call for NVME ports. Don't
2617          * clear any rport state until the transport calls back.
2618          */
2619
2620         if (ndlp->nlp_type & NLP_NVME_TARGET) {
2621                 /* No concern about the role change on the nvme remoteport.
2622                  * The transport will update it.
2623                  */
2624                 spin_lock_irq(&vport->phba->hbalock);
2625                 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
2626                 spin_unlock_irq(&vport->phba->hbalock);
2627
2628                 /* Don't let the host nvme transport keep sending keep-alives
2629                  * on this remoteport. Vport is unloading, no recovery. The
2630                  * return values is ignored.  The upcall is a courtesy to the
2631                  * transport.
2632                  */
2633                 if (vport->load_flag & FC_UNLOADING)
2634                         (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2635
2636                 ret = nvme_fc_unregister_remoteport(remoteport);
2637
2638                 /* The driver no longer knows if the nrport memory is valid.
2639                  * because the controller teardown process has begun and
2640                  * is asynchronous.  Break the binding in the ndlp. Also
2641                  * remove the register ndlp reference to setup node release.
2642                  */
2643                 ndlp->nrport = NULL;
2644                 lpfc_nlp_put(ndlp);
2645                 if (ret != 0) {
2646                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2647                                          "6167 NVME unregister failed %d "
2648                                          "port_state x%x\n",
2649                                          ret, remoteport->port_state);
2650                 }
2651         }
2652         return;
2653
2654  input_err:
2655 #endif
2656         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2657                          "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2658                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2659 }
2660
2661 /**
2662  * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
2663  * @phba: pointer to lpfc hba data structure.
2664  * @lpfc_ncmd: The nvme job structure for the request being aborted.
2665  *
2666  * This routine is invoked by the worker thread to process a SLI4 fast-path
2667  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2668  * here.
2669  **/
2670 void
2671 lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
2672                                    struct lpfc_io_buf *lpfc_ncmd)
2673 {
2674         struct nvmefc_fcp_req *nvme_cmd = NULL;
2675
2676         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2677                         "6533 %s nvme_cmd %p tag x%x abort complete and "
2678                         "xri released\n", __func__,
2679                         lpfc_ncmd->nvmeCmd,
2680                         lpfc_ncmd->cur_iocbq.iotag);
2681
2682         /* Aborted NVME commands are required to not complete
2683          * before the abort exchange command fully completes.
2684          * Once completed, it is available via the put list.
2685          */
2686         if (lpfc_ncmd->nvmeCmd) {
2687                 nvme_cmd = lpfc_ncmd->nvmeCmd;
2688                 nvme_cmd->transferred_length = 0;
2689                 nvme_cmd->rcv_rsplen = 0;
2690                 nvme_cmd->status = NVME_SC_INTERNAL;
2691                 nvme_cmd->done(nvme_cmd);
2692                 lpfc_ncmd->nvmeCmd = NULL;
2693         }
2694         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2695 }
2696
2697 /**
2698  * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2699  * @phba: pointer to lpfc hba data structure.
2700  * @axri: pointer to the fcp xri abort wcqe structure.
2701  * @lpfc_ncmd: The nvme job structure for the request being aborted.
2702  *
2703  * This routine is invoked by the worker thread to process a SLI4 fast-path
2704  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2705  * here.
2706  **/
2707 void
2708 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2709                            struct sli4_wcqe_xri_aborted *axri,
2710                            struct lpfc_io_buf *lpfc_ncmd)
2711 {
2712         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2713         struct nvmefc_fcp_req *nvme_cmd = NULL;
2714         struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2715
2716
2717         if (ndlp)
2718                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2719
2720         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2721                         "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2722                         "xri released\n",
2723                         lpfc_ncmd->nvmeCmd, xri,
2724                         lpfc_ncmd->cur_iocbq.iotag);
2725
2726         /* Aborted NVME commands are required to not complete
2727          * before the abort exchange command fully completes.
2728          * Once completed, it is available via the put list.
2729          */
2730         if (lpfc_ncmd->nvmeCmd) {
2731                 nvme_cmd = lpfc_ncmd->nvmeCmd;
2732                 nvme_cmd->done(nvme_cmd);
2733                 lpfc_ncmd->nvmeCmd = NULL;
2734         }
2735         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2736 }
2737
2738 /**
2739  * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2740  * @phba: Pointer to HBA context object.
2741  *
2742  * This function flushes all wqes in the nvme rings and frees all resources
2743  * in the txcmplq. This function does not issue abort wqes for the IO
2744  * commands in txcmplq, they will just be returned with
2745  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2746  * slot has been permanently disabled.
2747  **/
2748 void
2749 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2750 {
2751         struct lpfc_sli_ring  *pring;
2752         u32 i, wait_cnt = 0;
2753
2754         if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2755                 return;
2756
2757         /* Cycle through all IO rings and make sure all outstanding
2758          * WQEs have been removed from the txcmplqs.
2759          */
2760         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2761                 if (!phba->sli4_hba.hdwq[i].io_wq)
2762                         continue;
2763                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2764
2765                 if (!pring)
2766                         continue;
2767
2768                 /* Retrieve everything on the txcmplq */
2769                 while (!list_empty(&pring->txcmplq)) {
2770                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2771                         wait_cnt++;
2772
2773                         /* The sleep is 10mS.  Every ten seconds,
2774                          * dump a message.  Something is wrong.
2775                          */
2776                         if ((wait_cnt % 1000) == 0) {
2777                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2778                                                 "6178 NVME IO not empty, "
2779                                                 "cnt %d\n", wait_cnt);
2780                         }
2781                 }
2782         }
2783
2784         /* Make sure HBA is alive */
2785         lpfc_issue_hb_tmo(phba);
2786
2787 }
2788
2789 void
2790 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2791                       uint32_t stat, uint32_t param)
2792 {
2793 #if (IS_ENABLED(CONFIG_NVME_FC))
2794         struct lpfc_io_buf *lpfc_ncmd;
2795         struct nvmefc_fcp_req *nCmd;
2796         struct lpfc_wcqe_complete wcqe;
2797         struct lpfc_wcqe_complete *wcqep = &wcqe;
2798
2799         lpfc_ncmd = pwqeIn->io_buf;
2800         if (!lpfc_ncmd) {
2801                 lpfc_sli_release_iocbq(phba, pwqeIn);
2802                 return;
2803         }
2804         /* For abort iocb just return, IO iocb will do a done call */
2805         if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2806             CMD_ABORT_XRI_CX) {
2807                 lpfc_sli_release_iocbq(phba, pwqeIn);
2808                 return;
2809         }
2810
2811         spin_lock(&lpfc_ncmd->buf_lock);
2812         nCmd = lpfc_ncmd->nvmeCmd;
2813         if (!nCmd) {
2814                 spin_unlock(&lpfc_ncmd->buf_lock);
2815                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2816                 return;
2817         }
2818         spin_unlock(&lpfc_ncmd->buf_lock);
2819
2820         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2821                         "6194 NVME Cancel xri %x\n",
2822                         lpfc_ncmd->cur_iocbq.sli4_xritag);
2823
2824         wcqep->word0 = 0;
2825         bf_set(lpfc_wcqe_c_status, wcqep, stat);
2826         wcqep->parameter = param;
2827         wcqep->word3 = 0; /* xb is 0 */
2828
2829         /* Call release with XB=1 to queue the IO into the abort list. */
2830         if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2831                 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2832
2833         memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
2834         (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
2835 #endif
2836 }