GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
57
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59                                                  struct lpfc_nvmet_rcv_ctx *,
60                                                  dma_addr_t rspbuf,
61                                                  uint16_t rspsize);
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63                                                   struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65                                           struct lpfc_nvmet_rcv_ctx *,
66                                           uint32_t, uint16_t);
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68                                             struct lpfc_nvmet_rcv_ctx *,
69                                             uint32_t, uint16_t);
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71                                            struct lpfc_nvmet_rcv_ctx *,
72                                            uint32_t, uint16_t);
73
74 void
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
76 {
77         unsigned long iflag;
78
79         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80                         "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81                         ctxp->oxid, ctxp->flag);
82
83         spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85                 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
86                                        iflag);
87                 return;
88         }
89         ctxp->flag |= LPFC_NVMET_CTX_RLS;
90         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91         spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
92 }
93
94 /**
95  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96  * @phba: Pointer to HBA context object.
97  * @cmdwqe: Pointer to driver command WQE object.
98  * @wcqe: Pointer to driver response CQE object.
99  *
100  * The function is called from SLI ring event handler with no
101  * lock held. This function is the completion handler for NVME LS commands
102  * The function frees memory resources used for the NVME commands.
103  **/
104 static void
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106                           struct lpfc_wcqe_complete *wcqe)
107 {
108         struct lpfc_nvmet_tgtport *tgtp;
109         struct nvmefc_tgt_ls_req *rsp;
110         struct lpfc_nvmet_rcv_ctx *ctxp;
111         uint32_t status, result;
112
113         status = bf_get(lpfc_wcqe_c_status, wcqe);
114         result = wcqe->parameter;
115         ctxp = cmdwqe->context2;
116
117         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
118                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
119                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
120                                 "%d %d\n",
121                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
122         }
123
124         if (!phba->targetport)
125                 goto out;
126
127         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
128
129         if (status)
130                 atomic_inc(&tgtp->xmt_ls_rsp_error);
131         else
132                 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
133
134 out:
135         rsp = &ctxp->ctx.ls_req;
136
137         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
138                          ctxp->oxid, status, result);
139
140         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
141                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
142                         status, result, ctxp->oxid);
143
144         lpfc_nlp_put(cmdwqe->context1);
145         cmdwqe->context2 = NULL;
146         cmdwqe->context3 = NULL;
147         lpfc_sli_release_iocbq(phba, cmdwqe);
148         rsp->done(rsp);
149         kfree(ctxp);
150 }
151
152 /**
153  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
154  * @phba: HBA buffer is associated with
155  * @ctxp: context to clean up
156  * @mp: Buffer to free
157  *
158  * Description: Frees the given DMA buffer in the appropriate way given by
159  * reposting it to its associated RQ so it can be reused.
160  *
161  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
162  *
163  * Returns: None
164  **/
165 void
166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
167 {
168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
169         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
170         struct lpfc_nvmet_tgtport *tgtp;
171         struct fc_frame_header *fc_hdr;
172         struct rqb_dmabuf *nvmebuf;
173         struct lpfc_nvmet_ctx_info *infop;
174         uint32_t *payload;
175         uint32_t size, oxid, sid, rc;
176         int cpu;
177         unsigned long iflag;
178
179         if (ctxp->txrdy) {
180                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
181                               ctxp->txrdy_phys);
182                 ctxp->txrdy = NULL;
183                 ctxp->txrdy_phys = 0;
184         }
185
186         if (ctxp->state == LPFC_NVMET_STE_FREE) {
187                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
188                                 "6411 NVMET free, already free IO x%x: %d %d\n",
189                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
190         }
191         ctxp->state = LPFC_NVMET_STE_FREE;
192
193         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
194         if (phba->sli4_hba.nvmet_io_wait_cnt) {
195                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
196                                  nvmebuf, struct rqb_dmabuf,
197                                  hbuf.list);
198                 phba->sli4_hba.nvmet_io_wait_cnt--;
199                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
200                                        iflag);
201
202                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
203                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
204                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
205                 payload = (uint32_t *)(nvmebuf->dbuf.virt);
206                 size = nvmebuf->bytes_recv;
207                 sid = sli4_sid_from_fc_hdr(fc_hdr);
208
209                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
210                 ctxp->wqeq = NULL;
211                 ctxp->txrdy = NULL;
212                 ctxp->offset = 0;
213                 ctxp->phba = phba;
214                 ctxp->size = size;
215                 ctxp->oxid = oxid;
216                 ctxp->sid = sid;
217                 ctxp->state = LPFC_NVMET_STE_RCV;
218                 ctxp->entry_cnt = 1;
219                 ctxp->flag = 0;
220                 ctxp->ctxbuf = ctx_buf;
221                 spin_lock_init(&ctxp->ctxlock);
222
223 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
224                 if (phba->ktime_on) {
225                         ctxp->ts_cmd_nvme = ktime_get_ns();
226                         ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
227                         ctxp->ts_nvme_data = 0;
228                         ctxp->ts_data_wqput = 0;
229                         ctxp->ts_isr_data = 0;
230                         ctxp->ts_data_nvme = 0;
231                         ctxp->ts_nvme_status = 0;
232                         ctxp->ts_status_wqput = 0;
233                         ctxp->ts_isr_status = 0;
234                         ctxp->ts_status_nvme = 0;
235                 }
236 #endif
237                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
238                 /*
239                  * The calling sequence should be:
240                  * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
241                  * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
242                  * When we return from nvmet_fc_rcv_fcp_req, all relevant info
243                  * the NVME command / FC header is stored.
244                  * A buffer has already been reposted for this IO, so just free
245                  * the nvmebuf.
246                  */
247                 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
248                                           payload, size);
249
250                 /* Process FCP command */
251                 if (rc == 0) {
252                         atomic_inc(&tgtp->rcv_fcp_cmd_out);
253                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
254                         return;
255                 }
256
257                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
258                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
259                                 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
260                                 ctxp->oxid, rc,
261                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
262                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
263                                 atomic_read(&tgtp->xmt_fcp_release));
264
265                 lpfc_nvmet_defer_release(phba, ctxp);
266                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
267                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
268                 return;
269         }
270         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
271
272         /*
273          * Use the CPU context list, from the MRQ the IO was received on
274          * (ctxp->idx), to save context structure.
275          */
276         cpu = smp_processor_id();
277         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
278         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
279         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
280         infop->nvmet_ctx_list_cnt++;
281         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
282 #endif
283 }
284
285 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
286 static void
287 lpfc_nvmet_ktime(struct lpfc_hba *phba,
288                  struct lpfc_nvmet_rcv_ctx *ctxp)
289 {
290         uint64_t seg1, seg2, seg3, seg4, seg5;
291         uint64_t seg6, seg7, seg8, seg9, seg10;
292
293         if (!phba->ktime_on)
294                 return;
295
296         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
297             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
298             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
299             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
300             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
301                 return;
302
303         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
304                 return;
305         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
306                 return;
307         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
308                 return;
309         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
310                 return;
311         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
312                 return;
313         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
314                 return;
315         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
316                 return;
317         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
318                 return;
319         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
320                 return;
321         /*
322          * Segment 1 - Time from FCP command received by MSI-X ISR
323          * to FCP command is passed to NVME Layer.
324          * Segment 2 - Time from FCP command payload handed
325          * off to NVME Layer to Driver receives a Command op
326          * from NVME Layer.
327          * Segment 3 - Time from Driver receives a Command op
328          * from NVME Layer to Command is put on WQ.
329          * Segment 4 - Time from Driver WQ put is done
330          * to MSI-X ISR for Command cmpl.
331          * Segment 5 - Time from MSI-X ISR for Command cmpl to
332          * Command cmpl is passed to NVME Layer.
333          * Segment 6 - Time from Command cmpl is passed to NVME
334          * Layer to Driver receives a RSP op from NVME Layer.
335          * Segment 7 - Time from Driver receives a RSP op from
336          * NVME Layer to WQ put is done on TRSP FCP Status.
337          * Segment 8 - Time from Driver WQ put is done on TRSP
338          * FCP Status to MSI-X ISR for TRSP cmpl.
339          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
340          * TRSP cmpl is passed to NVME Layer.
341          * Segment 10 - Time from FCP command received by
342          * MSI-X ISR to command is completed on wire.
343          * (Segments 1 thru 8) for READDATA / WRITEDATA
344          * (Segments 1 thru 4) for READDATA_RSP
345          */
346         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
347         seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
348         seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
349                 seg1 - seg2;
350         seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
351                 seg1 - seg2 - seg3;
352         seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
353                 seg1 - seg2 - seg3 - seg4;
354
355         /* For auto rsp commands seg6 thru seg10 will be 0 */
356         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
357                 seg6 = (ctxp->ts_nvme_status -
358                         ctxp->ts_isr_cmd) -
359                         seg1 - seg2 - seg3 - seg4 - seg5;
360                 seg7 = (ctxp->ts_status_wqput -
361                         ctxp->ts_isr_cmd) -
362                         seg1 - seg2 - seg3 -
363                         seg4 - seg5 - seg6;
364                 seg8 = (ctxp->ts_isr_status -
365                         ctxp->ts_isr_cmd) -
366                         seg1 - seg2 - seg3 - seg4 -
367                         seg5 - seg6 - seg7;
368                 seg9 = (ctxp->ts_status_nvme -
369                         ctxp->ts_isr_cmd) -
370                         seg1 - seg2 - seg3 - seg4 -
371                         seg5 - seg6 - seg7 - seg8;
372                 seg10 = (ctxp->ts_isr_status -
373                         ctxp->ts_isr_cmd);
374         } else {
375                 seg6 =  0;
376                 seg7 =  0;
377                 seg8 =  0;
378                 seg9 =  0;
379                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
380         }
381
382         phba->ktime_seg1_total += seg1;
383         if (seg1 < phba->ktime_seg1_min)
384                 phba->ktime_seg1_min = seg1;
385         else if (seg1 > phba->ktime_seg1_max)
386                 phba->ktime_seg1_max = seg1;
387
388         phba->ktime_seg2_total += seg2;
389         if (seg2 < phba->ktime_seg2_min)
390                 phba->ktime_seg2_min = seg2;
391         else if (seg2 > phba->ktime_seg2_max)
392                 phba->ktime_seg2_max = seg2;
393
394         phba->ktime_seg3_total += seg3;
395         if (seg3 < phba->ktime_seg3_min)
396                 phba->ktime_seg3_min = seg3;
397         else if (seg3 > phba->ktime_seg3_max)
398                 phba->ktime_seg3_max = seg3;
399
400         phba->ktime_seg4_total += seg4;
401         if (seg4 < phba->ktime_seg4_min)
402                 phba->ktime_seg4_min = seg4;
403         else if (seg4 > phba->ktime_seg4_max)
404                 phba->ktime_seg4_max = seg4;
405
406         phba->ktime_seg5_total += seg5;
407         if (seg5 < phba->ktime_seg5_min)
408                 phba->ktime_seg5_min = seg5;
409         else if (seg5 > phba->ktime_seg5_max)
410                 phba->ktime_seg5_max = seg5;
411
412         phba->ktime_data_samples++;
413         if (!seg6)
414                 goto out;
415
416         phba->ktime_seg6_total += seg6;
417         if (seg6 < phba->ktime_seg6_min)
418                 phba->ktime_seg6_min = seg6;
419         else if (seg6 > phba->ktime_seg6_max)
420                 phba->ktime_seg6_max = seg6;
421
422         phba->ktime_seg7_total += seg7;
423         if (seg7 < phba->ktime_seg7_min)
424                 phba->ktime_seg7_min = seg7;
425         else if (seg7 > phba->ktime_seg7_max)
426                 phba->ktime_seg7_max = seg7;
427
428         phba->ktime_seg8_total += seg8;
429         if (seg8 < phba->ktime_seg8_min)
430                 phba->ktime_seg8_min = seg8;
431         else if (seg8 > phba->ktime_seg8_max)
432                 phba->ktime_seg8_max = seg8;
433
434         phba->ktime_seg9_total += seg9;
435         if (seg9 < phba->ktime_seg9_min)
436                 phba->ktime_seg9_min = seg9;
437         else if (seg9 > phba->ktime_seg9_max)
438                 phba->ktime_seg9_max = seg9;
439 out:
440         phba->ktime_seg10_total += seg10;
441         if (seg10 < phba->ktime_seg10_min)
442                 phba->ktime_seg10_min = seg10;
443         else if (seg10 > phba->ktime_seg10_max)
444                 phba->ktime_seg10_max = seg10;
445         phba->ktime_status_samples++;
446 }
447 #endif
448
449 /**
450  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
451  * @phba: Pointer to HBA context object.
452  * @cmdwqe: Pointer to driver command WQE object.
453  * @wcqe: Pointer to driver response CQE object.
454  *
455  * The function is called from SLI ring event handler with no
456  * lock held. This function is the completion handler for NVME FCP commands
457  * The function frees memory resources used for the NVME commands.
458  **/
459 static void
460 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
461                           struct lpfc_wcqe_complete *wcqe)
462 {
463         struct lpfc_nvmet_tgtport *tgtp;
464         struct nvmefc_tgt_fcp_req *rsp;
465         struct lpfc_nvmet_rcv_ctx *ctxp;
466         uint32_t status, result, op, start_clean;
467 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
468         uint32_t id;
469 #endif
470
471         ctxp = cmdwqe->context2;
472         ctxp->flag &= ~LPFC_NVMET_IO_INP;
473
474         rsp = &ctxp->ctx.fcp_req;
475         op = rsp->op;
476
477         status = bf_get(lpfc_wcqe_c_status, wcqe);
478         result = wcqe->parameter;
479
480         if (phba->targetport)
481                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
482         else
483                 tgtp = NULL;
484
485         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
486                          ctxp->oxid, op, status);
487
488         if (status) {
489                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
490                 rsp->transferred_length = 0;
491                 if (tgtp)
492                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
493
494                 /* pick up SLI4 exhange busy condition */
495                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
496                         ctxp->flag |= LPFC_NVMET_XBUSY;
497
498                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
499                                         "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
500                                         ctxp->oxid, status, result);
501                 } else {
502                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
503                 }
504
505         } else {
506                 rsp->fcp_error = NVME_SC_SUCCESS;
507                 if (op == NVMET_FCOP_RSP)
508                         rsp->transferred_length = rsp->rsplen;
509                 else
510                         rsp->transferred_length = rsp->transfer_length;
511                 if (tgtp)
512                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
513         }
514
515         if ((op == NVMET_FCOP_READDATA_RSP) ||
516             (op == NVMET_FCOP_RSP)) {
517                 /* Sanity check */
518                 ctxp->state = LPFC_NVMET_STE_DONE;
519                 ctxp->entry_cnt++;
520
521 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
522                 if (phba->ktime_on) {
523                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
524                                 ctxp->ts_isr_data =
525                                         cmdwqe->isr_timestamp;
526                                 ctxp->ts_data_nvme =
527                                         ktime_get_ns();
528                                 ctxp->ts_nvme_status =
529                                         ctxp->ts_data_nvme;
530                                 ctxp->ts_status_wqput =
531                                         ctxp->ts_data_nvme;
532                                 ctxp->ts_isr_status =
533                                         ctxp->ts_data_nvme;
534                                 ctxp->ts_status_nvme =
535                                         ctxp->ts_data_nvme;
536                         } else {
537                                 ctxp->ts_isr_status =
538                                         cmdwqe->isr_timestamp;
539                                 ctxp->ts_status_nvme =
540                                         ktime_get_ns();
541                         }
542                 }
543                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
544                         id = smp_processor_id();
545                         if (ctxp->cpu != id)
546                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
547                                                 "6703 CPU Check cmpl: "
548                                                 "cpu %d expect %d\n",
549                                                 id, ctxp->cpu);
550                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
551                                 phba->cpucheck_cmpl_io[id]++;
552                 }
553 #endif
554                 rsp->done(rsp);
555 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
556                 if (phba->ktime_on)
557                         lpfc_nvmet_ktime(phba, ctxp);
558 #endif
559                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
560         } else {
561                 ctxp->entry_cnt++;
562                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
563                 memset(((char *)cmdwqe) + start_clean, 0,
564                        (sizeof(struct lpfc_iocbq) - start_clean));
565 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
566                 if (phba->ktime_on) {
567                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
568                         ctxp->ts_data_nvme = ktime_get_ns();
569                 }
570                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
571                         id = smp_processor_id();
572                         if (ctxp->cpu != id)
573                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
574                                                 "6704 CPU Check cmdcmpl: "
575                                                 "cpu %d expect %d\n",
576                                                 id, ctxp->cpu);
577                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
578                                 phba->cpucheck_ccmpl_io[id]++;
579                 }
580 #endif
581                 rsp->done(rsp);
582         }
583 }
584
585 static int
586 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
587                       struct nvmefc_tgt_ls_req *rsp)
588 {
589         struct lpfc_nvmet_rcv_ctx *ctxp =
590                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
591         struct lpfc_hba *phba = ctxp->phba;
592         struct hbq_dmabuf *nvmebuf =
593                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
594         struct lpfc_iocbq *nvmewqeq;
595         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
596         struct lpfc_dmabuf dmabuf;
597         struct ulp_bde64 bpl;
598         int rc;
599
600         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
601                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
602
603         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
604             (ctxp->entry_cnt != 1)) {
605                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
606                                 "6412 NVMET LS rsp state mismatch "
607                                 "oxid x%x: %d %d\n",
608                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
609         }
610         ctxp->state = LPFC_NVMET_STE_LS_RSP;
611         ctxp->entry_cnt++;
612
613         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
614                                       rsp->rsplen);
615         if (nvmewqeq == NULL) {
616                 atomic_inc(&nvmep->xmt_ls_drop);
617                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
618                                 "6150 LS Drop IO x%x: Prep\n",
619                                 ctxp->oxid);
620                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
621                 atomic_inc(&nvmep->xmt_ls_abort);
622                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
623                                                 ctxp->sid, ctxp->oxid);
624                 return -ENOMEM;
625         }
626
627         /* Save numBdes for bpl2sgl */
628         nvmewqeq->rsvd2 = 1;
629         nvmewqeq->hba_wqidx = 0;
630         nvmewqeq->context3 = &dmabuf;
631         dmabuf.virt = &bpl;
632         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
633         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
634         bpl.tus.f.bdeSize = rsp->rsplen;
635         bpl.tus.f.bdeFlags = 0;
636         bpl.tus.w = le32_to_cpu(bpl.tus.w);
637
638         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
639         nvmewqeq->iocb_cmpl = NULL;
640         nvmewqeq->context2 = ctxp;
641
642         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
643                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
644
645         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
646         if (rc == WQE_SUCCESS) {
647                 /*
648                  * Okay to repost buffer here, but wait till cmpl
649                  * before freeing ctxp and iocbq.
650                  */
651                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
652                 ctxp->rqb_buffer = 0;
653                 atomic_inc(&nvmep->xmt_ls_rsp);
654                 return 0;
655         }
656         /* Give back resources */
657         atomic_inc(&nvmep->xmt_ls_drop);
658         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
659                         "6151 LS Drop IO x%x: Issue %d\n",
660                         ctxp->oxid, rc);
661
662         lpfc_nlp_put(nvmewqeq->context1);
663
664         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
665         atomic_inc(&nvmep->xmt_ls_abort);
666         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
667         return -ENXIO;
668 }
669
670 static int
671 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
672                       struct nvmefc_tgt_fcp_req *rsp)
673 {
674         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
675         struct lpfc_nvmet_rcv_ctx *ctxp =
676                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
677         struct lpfc_hba *phba = ctxp->phba;
678         struct lpfc_iocbq *nvmewqeq;
679         int rc;
680
681 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
682         if (phba->ktime_on) {
683                 if (rsp->op == NVMET_FCOP_RSP)
684                         ctxp->ts_nvme_status = ktime_get_ns();
685                 else
686                         ctxp->ts_nvme_data = ktime_get_ns();
687         }
688         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
689                 int id = smp_processor_id();
690                 ctxp->cpu = id;
691                 if (id < LPFC_CHECK_CPU_CNT)
692                         phba->cpucheck_xmt_io[id]++;
693                 if (rsp->hwqid != id) {
694                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
695                                         "6705 CPU Check OP: "
696                                         "cpu %d expect %d\n",
697                                         id, rsp->hwqid);
698                         ctxp->cpu = rsp->hwqid;
699                 }
700         }
701 #endif
702
703         /* Sanity check */
704         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
705             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
706                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
707                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
708                                 "6102 IO xri x%x aborted\n",
709                                 ctxp->oxid);
710                 rc = -ENXIO;
711                 goto aerr;
712         }
713
714         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
715         if (nvmewqeq == NULL) {
716                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
717                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
718                                 "6152 FCP Drop IO x%x: Prep\n",
719                                 ctxp->oxid);
720                 rc = -ENXIO;
721                 goto aerr;
722         }
723
724         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
725         nvmewqeq->iocb_cmpl = NULL;
726         nvmewqeq->context2 = ctxp;
727         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
728         ctxp->wqeq->hba_wqidx = rsp->hwqid;
729
730         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
731                          ctxp->oxid, rsp->op, rsp->rsplen);
732
733         ctxp->flag |= LPFC_NVMET_IO_INP;
734         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
735         if (rc == WQE_SUCCESS) {
736 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
737                 if (!phba->ktime_on)
738                         return 0;
739                 if (rsp->op == NVMET_FCOP_RSP)
740                         ctxp->ts_status_wqput = ktime_get_ns();
741                 else
742                         ctxp->ts_data_wqput = ktime_get_ns();
743 #endif
744                 return 0;
745         }
746
747         /* Give back resources */
748         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
749         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
750                         "6153 FCP Drop IO x%x: Issue: %d\n",
751                         ctxp->oxid, rc);
752
753         ctxp->wqeq->hba_wqidx = 0;
754         nvmewqeq->context2 = NULL;
755         nvmewqeq->context3 = NULL;
756         rc = -EBUSY;
757 aerr:
758         return rc;
759 }
760
761 static void
762 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
763 {
764         struct lpfc_nvmet_tgtport *tport = targetport->private;
765
766         /* release any threads waiting for the unreg to complete */
767         complete(&tport->tport_unreg_done);
768 }
769
770 static void
771 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
772                          struct nvmefc_tgt_fcp_req *req)
773 {
774         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
775         struct lpfc_nvmet_rcv_ctx *ctxp =
776                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
777         struct lpfc_hba *phba = ctxp->phba;
778         unsigned long flags;
779
780         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
781                         "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
782                         ctxp->oxid, ctxp->flag, ctxp->state);
783
784         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
785                          ctxp->oxid, ctxp->flag, ctxp->state);
786
787         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
788
789         spin_lock_irqsave(&ctxp->ctxlock, flags);
790
791         /* Since iaab/iaar are NOT set, we need to check
792          * if the firmware is in process of aborting IO
793          */
794         if (ctxp->flag & LPFC_NVMET_XBUSY) {
795                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
796                 return;
797         }
798         ctxp->flag |= LPFC_NVMET_ABORT_OP;
799
800         /* An state of LPFC_NVMET_STE_RCV means we have just received
801          * the NVME command and have not started processing it.
802          * (by issuing any IO WQEs on this exchange yet)
803          */
804         if (ctxp->state == LPFC_NVMET_STE_RCV)
805                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
806                                                  ctxp->oxid);
807         else
808                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
809                                                ctxp->oxid);
810         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
811 }
812
813 static void
814 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
815                            struct nvmefc_tgt_fcp_req *rsp)
816 {
817         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
818         struct lpfc_nvmet_rcv_ctx *ctxp =
819                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
820         struct lpfc_hba *phba = ctxp->phba;
821         unsigned long flags;
822         bool aborting = false;
823
824         if (ctxp->state != LPFC_NVMET_STE_DONE &&
825             ctxp->state != LPFC_NVMET_STE_ABORT) {
826                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
827                                 "6413 NVMET release bad state %d %d oxid x%x\n",
828                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
829         }
830
831         spin_lock_irqsave(&ctxp->ctxlock, flags);
832         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
833             (ctxp->flag & LPFC_NVMET_XBUSY)) {
834                 aborting = true;
835                 /* let the abort path do the real release */
836                 lpfc_nvmet_defer_release(phba, ctxp);
837         }
838         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
839
840         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
841                          ctxp->state, aborting);
842
843         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
844
845         if (aborting)
846                 return;
847
848         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
849 }
850
851 static void
852 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
853                      struct nvmefc_tgt_fcp_req *rsp)
854 {
855         struct lpfc_nvmet_tgtport *tgtp;
856         struct lpfc_nvmet_rcv_ctx *ctxp =
857                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
858         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
859         struct lpfc_hba *phba = ctxp->phba;
860
861         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
862                          ctxp->oxid, ctxp->size, smp_processor_id());
863
864         tgtp = phba->targetport->private;
865         atomic_inc(&tgtp->rcv_fcp_cmd_defer);
866         lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
867 }
868
869 static struct nvmet_fc_target_template lpfc_tgttemplate = {
870         .targetport_delete = lpfc_nvmet_targetport_delete,
871         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
872         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
873         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
874         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
875         .defer_rcv      = lpfc_nvmet_defer_rcv,
876
877         .max_hw_queues  = 1,
878         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
879         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
880         .dma_boundary = 0xFFFFFFFF,
881
882         /* optional features */
883         .target_features = 0,
884         /* sizes of additional private data for data structures */
885         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
886 };
887
888 static void
889 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
890                 struct lpfc_nvmet_ctx_info *infop)
891 {
892         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
893         unsigned long flags;
894
895         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
896         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
897                                 &infop->nvmet_ctx_list, list) {
898                 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
899                 list_del_init(&ctx_buf->list);
900                 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
901
902                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
903                 ctx_buf->sglq->state = SGL_FREED;
904                 ctx_buf->sglq->ndlp = NULL;
905
906                 spin_lock(&phba->sli4_hba.sgl_list_lock);
907                 list_add_tail(&ctx_buf->sglq->list,
908                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
909                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
910
911                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
912                 kfree(ctx_buf->context);
913         }
914         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
915 }
916
917 static void
918 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
919 {
920         struct lpfc_nvmet_ctx_info *infop;
921         int i, j;
922
923         /* The first context list, MRQ 0 CPU 0 */
924         infop = phba->sli4_hba.nvmet_ctx_info;
925         if (!infop)
926                 return;
927
928         /* Cycle the the entire CPU context list for every MRQ */
929         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
930                 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
931                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
932                         infop++; /* next */
933                 }
934         }
935         kfree(phba->sli4_hba.nvmet_ctx_info);
936         phba->sli4_hba.nvmet_ctx_info = NULL;
937 }
938
939 static int
940 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
941 {
942         struct lpfc_nvmet_ctxbuf *ctx_buf;
943         struct lpfc_iocbq *nvmewqe;
944         union lpfc_wqe128 *wqe;
945         struct lpfc_nvmet_ctx_info *last_infop;
946         struct lpfc_nvmet_ctx_info *infop;
947         int i, j, idx;
948
949         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
950                         "6403 Allocate NVMET resources for %d XRIs\n",
951                         phba->sli4_hba.nvmet_xri_cnt);
952
953         phba->sli4_hba.nvmet_ctx_info = kcalloc(
954                 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
955                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
956         if (!phba->sli4_hba.nvmet_ctx_info) {
957                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
958                                 "6419 Failed allocate memory for "
959                                 "nvmet context lists\n");
960                 return -ENOMEM;
961         }
962
963         /*
964          * Assuming X CPUs in the system, and Y MRQs, allocate some
965          * lpfc_nvmet_ctx_info structures as follows:
966          *
967          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
968          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
969          * ...
970          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
971          *
972          * Each line represents a MRQ "silo" containing an entry for
973          * every CPU.
974          *
975          * MRQ X is initially assumed to be associated with CPU X, thus
976          * contexts are initially distributed across all MRQs using
977          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
978          * freed, the are freed to the MRQ silo based on the CPU number
979          * of the IO completion. Thus a context that was allocated for MRQ A
980          * whose IO completed on CPU B will be freed to cpuB/mrqA.
981          */
982         infop = phba->sli4_hba.nvmet_ctx_info;
983         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
984                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
985                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
986                         spin_lock_init(&infop->nvmet_ctx_list_lock);
987                         infop->nvmet_ctx_list_cnt = 0;
988                         infop++;
989                 }
990         }
991
992         /*
993          * Setup the next CPU context info ptr for each MRQ.
994          * MRQ 0 will cycle thru CPUs 0 - X separately from
995          * MRQ 1 cycling thru CPUs 0 - X, and so on.
996          */
997         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
998                 last_infop = lpfc_get_ctx_list(phba, 0, j);
999                 for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
1000                         infop = lpfc_get_ctx_list(phba, i, j);
1001                         infop->nvmet_ctx_next_cpu = last_infop;
1002                         last_infop = infop;
1003                 }
1004         }
1005
1006         /* For all nvmet xris, allocate resources needed to process a
1007          * received command on a per xri basis.
1008          */
1009         idx = 0;
1010         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1011                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1012                 if (!ctx_buf) {
1013                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1014                                         "6404 Ran out of memory for NVMET\n");
1015                         return -ENOMEM;
1016                 }
1017
1018                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1019                                            GFP_KERNEL);
1020                 if (!ctx_buf->context) {
1021                         kfree(ctx_buf);
1022                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1023                                         "6405 Ran out of NVMET "
1024                                         "context memory\n");
1025                         return -ENOMEM;
1026                 }
1027                 ctx_buf->context->ctxbuf = ctx_buf;
1028                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1029
1030                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1031                 if (!ctx_buf->iocbq) {
1032                         kfree(ctx_buf->context);
1033                         kfree(ctx_buf);
1034                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1035                                         "6406 Ran out of NVMET iocb/WQEs\n");
1036                         return -ENOMEM;
1037                 }
1038                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1039                 nvmewqe = ctx_buf->iocbq;
1040                 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1041                 /* Initialize WQE */
1042                 memset(wqe, 0, sizeof(union lpfc_wqe));
1043                 /* Word 7 */
1044                 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
1045                 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
1046                 /* Word 10 */
1047                 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1048                 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
1049                 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
1050
1051                 ctx_buf->iocbq->context1 = NULL;
1052                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1053                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1054                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1055                 if (!ctx_buf->sglq) {
1056                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1057                         kfree(ctx_buf->context);
1058                         kfree(ctx_buf);
1059                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1060                                         "6407 Ran out of NVMET XRIs\n");
1061                         return -ENOMEM;
1062                 }
1063
1064                 /*
1065                  * Add ctx to MRQidx context list. Our initial assumption
1066                  * is MRQidx will be associated with CPUidx. This association
1067                  * can change on the fly.
1068                  */
1069                 infop = lpfc_get_ctx_list(phba, idx, idx);
1070                 spin_lock(&infop->nvmet_ctx_list_lock);
1071                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1072                 infop->nvmet_ctx_list_cnt++;
1073                 spin_unlock(&infop->nvmet_ctx_list_lock);
1074
1075                 /* Spread ctx structures evenly across all MRQs */
1076                 idx++;
1077                 if (idx >= phba->cfg_nvmet_mrq)
1078                         idx = 0;
1079         }
1080
1081         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1082                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1083                         infop = lpfc_get_ctx_list(phba, i, j);
1084                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1085                                         "6408 TOTAL NVMET ctx for CPU %d "
1086                                         "MRQ %d: cnt %d nextcpu %p\n",
1087                                         i, j, infop->nvmet_ctx_list_cnt,
1088                                         infop->nvmet_ctx_next_cpu);
1089                 }
1090         }
1091         return 0;
1092 }
1093
1094 int
1095 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1096 {
1097         struct lpfc_vport  *vport = phba->pport;
1098         struct lpfc_nvmet_tgtport *tgtp;
1099         struct nvmet_fc_port_info pinfo;
1100         int error;
1101
1102         if (phba->targetport)
1103                 return 0;
1104
1105         error = lpfc_nvmet_setup_io_context(phba);
1106         if (error)
1107                 return error;
1108
1109         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1110         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1111         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1112         pinfo.port_id = vport->fc_myDID;
1113
1114         /* Limit to LPFC_MAX_NVME_SEG_CNT.
1115          * For now need + 1 to get around NVME transport logic.
1116          */
1117         if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1118                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1119                                 "6400 Reducing sg segment cnt to %d\n",
1120                                 LPFC_MAX_NVME_SEG_CNT);
1121                 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1122         } else {
1123                 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1124         }
1125         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1126         lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1127         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
1128                                            NVMET_FCTGTFEAT_CMD_IN_ISR |
1129                                            NVMET_FCTGTFEAT_OPDONE_IN_ISR;
1130
1131 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1132         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1133                                              &phba->pcidev->dev,
1134                                              &phba->targetport);
1135 #else
1136         error = -ENOENT;
1137 #endif
1138         if (error) {
1139                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1140                                 "6025 Cannot register NVME targetport x%x: "
1141                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1142                                 error,
1143                                 pinfo.port_name, pinfo.node_name,
1144                                 lpfc_tgttemplate.max_sgl_segments,
1145                                 lpfc_tgttemplate.max_hw_queues);
1146                 phba->targetport = NULL;
1147                 phba->nvmet_support = 0;
1148
1149                 lpfc_nvmet_cleanup_io_context(phba);
1150
1151         } else {
1152                 tgtp = (struct lpfc_nvmet_tgtport *)
1153                         phba->targetport->private;
1154                 tgtp->phba = phba;
1155
1156                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1157                                 "6026 Registered NVME "
1158                                 "targetport: %p, private %p "
1159                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1160                                 phba->targetport, tgtp,
1161                                 pinfo.port_name, pinfo.node_name,
1162                                 lpfc_tgttemplate.max_sgl_segments,
1163                                 lpfc_tgttemplate.max_hw_queues);
1164
1165                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1166                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1167                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1168                 atomic_set(&tgtp->xmt_ls_abort, 0);
1169                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1170                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1171                 atomic_set(&tgtp->xmt_ls_drop, 0);
1172                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1173                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1174                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1175                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1176                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1177                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1178                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1179                 atomic_set(&tgtp->xmt_fcp_read, 0);
1180                 atomic_set(&tgtp->xmt_fcp_write, 0);
1181                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1182                 atomic_set(&tgtp->xmt_fcp_release, 0);
1183                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1184                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1185                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1186                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1187                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1188                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1189                 atomic_set(&tgtp->xmt_abort_sol, 0);
1190                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1191                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1192         }
1193         return error;
1194 }
1195
1196 int
1197 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1198 {
1199         struct lpfc_vport  *vport = phba->pport;
1200
1201         if (!phba->targetport)
1202                 return 0;
1203
1204         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1205                          "6007 Update NVMET port %p did x%x\n",
1206                          phba->targetport, vport->fc_myDID);
1207
1208         phba->targetport->port_id = vport->fc_myDID;
1209         return 0;
1210 }
1211
1212 /**
1213  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1214  * @phba: pointer to lpfc hba data structure.
1215  * @axri: pointer to the nvmet xri abort wcqe structure.
1216  *
1217  * This routine is invoked by the worker thread to process a SLI4 fast-path
1218  * NVMET aborted xri.
1219  **/
1220 void
1221 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1222                             struct sli4_wcqe_xri_aborted *axri)
1223 {
1224         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1225         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1226         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1227         struct lpfc_nodelist *ndlp;
1228         unsigned long iflag = 0;
1229         int rrq_empty = 0;
1230         bool released = false;
1231
1232         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1233                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1234
1235         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1236                 return;
1237         spin_lock_irqsave(&phba->hbalock, iflag);
1238         spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1239         list_for_each_entry_safe(ctxp, next_ctxp,
1240                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1241                                  list) {
1242                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1243                         continue;
1244
1245                 /* Check if we already received a free context call
1246                  * and we have completed processing an abort situation.
1247                  */
1248                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1249                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1250                         list_del(&ctxp->list);
1251                         released = true;
1252                 }
1253                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1254                 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1255
1256                 rrq_empty = list_empty(&phba->active_rrq_list);
1257                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1258                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1259                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1260                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1261                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1262                         lpfc_set_rrq_active(phba, ndlp,
1263                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1264                                 rxid, 1);
1265                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1266                 }
1267
1268                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1269                                 "6318 XB aborted oxid %x flg x%x (%x)\n",
1270                                 ctxp->oxid, ctxp->flag, released);
1271                 if (released)
1272                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1273
1274                 if (rrq_empty)
1275                         lpfc_worker_wake_up(phba);
1276                 return;
1277         }
1278         spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1279         spin_unlock_irqrestore(&phba->hbalock, iflag);
1280 }
1281
1282 int
1283 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1284                            struct fc_frame_header *fc_hdr)
1285
1286 {
1287 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1288         struct lpfc_hba *phba = vport->phba;
1289         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1290         struct nvmefc_tgt_fcp_req *rsp;
1291         uint16_t xri;
1292         unsigned long iflag = 0;
1293
1294         xri = be16_to_cpu(fc_hdr->fh_ox_id);
1295
1296         spin_lock_irqsave(&phba->hbalock, iflag);
1297         spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1298         list_for_each_entry_safe(ctxp, next_ctxp,
1299                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1300                                  list) {
1301                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1302                         continue;
1303
1304                 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1305                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1306
1307                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1308                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1309                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1310
1311                 lpfc_nvmeio_data(phba,
1312                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1313                         xri, smp_processor_id(), 0);
1314
1315                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1316                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1317
1318                 rsp = &ctxp->ctx.fcp_req;
1319                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1320
1321                 /* Respond with BA_ACC accordingly */
1322                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1323                 return 0;
1324         }
1325         spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1326         spin_unlock_irqrestore(&phba->hbalock, iflag);
1327
1328         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1329                          xri, smp_processor_id(), 1);
1330
1331         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1332                         "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1333
1334         /* Respond with BA_RJT accordingly */
1335         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1336 #endif
1337         return 0;
1338 }
1339
1340 void
1341 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1342 {
1343 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1344         struct lpfc_nvmet_tgtport *tgtp;
1345
1346         if (phba->nvmet_support == 0)
1347                 return;
1348         if (phba->targetport) {
1349                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1350                 init_completion(&tgtp->tport_unreg_done);
1351                 nvmet_fc_unregister_targetport(phba->targetport);
1352                 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1353                 lpfc_nvmet_cleanup_io_context(phba);
1354         }
1355         phba->targetport = NULL;
1356 #endif
1357 }
1358
1359 /**
1360  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1361  * @phba: pointer to lpfc hba data structure.
1362  * @pring: pointer to a SLI ring.
1363  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1364  *
1365  * This routine is used for processing the WQE associated with a unsolicited
1366  * event. It first determines whether there is an existing ndlp that matches
1367  * the DID from the unsolicited WQE. If not, it will create a new one with
1368  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1369  * WQE is then used to invoke the proper routine and to set up proper state
1370  * of the discovery state machine.
1371  **/
1372 static void
1373 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1374                            struct hbq_dmabuf *nvmebuf)
1375 {
1376 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1377         struct lpfc_nvmet_tgtport *tgtp;
1378         struct fc_frame_header *fc_hdr;
1379         struct lpfc_nvmet_rcv_ctx *ctxp;
1380         uint32_t *payload;
1381         uint32_t size, oxid, sid, rc;
1382
1383         if (!nvmebuf || !phba->targetport) {
1384                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1385                                 "6154 LS Drop IO\n");
1386                 oxid = 0;
1387                 size = 0;
1388                 sid = 0;
1389                 ctxp = NULL;
1390                 goto dropit;
1391         }
1392
1393         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1394         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1395         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1396         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1397         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1398         sid = sli4_sid_from_fc_hdr(fc_hdr);
1399
1400         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1401         if (ctxp == NULL) {
1402                 atomic_inc(&tgtp->rcv_ls_req_drop);
1403                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1404                                 "6155 LS Drop IO x%x: Alloc\n",
1405                                 oxid);
1406 dropit:
1407                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1408                                  "xri x%x sz %d from %06x\n",
1409                                  oxid, size, sid);
1410                 if (nvmebuf)
1411                         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1412                 return;
1413         }
1414         ctxp->phba = phba;
1415         ctxp->size = size;
1416         ctxp->oxid = oxid;
1417         ctxp->sid = sid;
1418         ctxp->wqeq = NULL;
1419         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1420         ctxp->entry_cnt = 1;
1421         ctxp->rqb_buffer = (void *)nvmebuf;
1422
1423         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1424                          oxid, size, sid);
1425         /*
1426          * The calling sequence should be:
1427          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1428          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1429          */
1430         atomic_inc(&tgtp->rcv_ls_req_in);
1431         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1432                                  payload, size);
1433
1434         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1435                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1436                         "%08x %08x %08x\n", size, rc,
1437                         *payload, *(payload+1), *(payload+2),
1438                         *(payload+3), *(payload+4), *(payload+5));
1439
1440         if (rc == 0) {
1441                 atomic_inc(&tgtp->rcv_ls_req_out);
1442                 return;
1443         }
1444
1445         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1446                          oxid, size, sid);
1447
1448         atomic_inc(&tgtp->rcv_ls_req_drop);
1449         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1450                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1451                         ctxp->oxid, rc);
1452
1453         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1454         if (nvmebuf)
1455                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1456
1457         atomic_inc(&tgtp->xmt_ls_abort);
1458         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1459 #endif
1460 }
1461
1462 static struct lpfc_nvmet_ctxbuf *
1463 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1464                              struct lpfc_nvmet_ctx_info *current_infop)
1465 {
1466 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1467         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1468         struct lpfc_nvmet_ctx_info *get_infop;
1469         int i;
1470
1471         /*
1472          * The current_infop for the MRQ a NVME command IU was received
1473          * on is empty. Our goal is to replenish this MRQs context
1474          * list from a another CPUs.
1475          *
1476          * First we need to pick a context list to start looking on.
1477          * nvmet_ctx_start_cpu has available context the last time
1478          * we needed to replenish this CPU where nvmet_ctx_next_cpu
1479          * is just the next sequential CPU for this MRQ.
1480          */
1481         if (current_infop->nvmet_ctx_start_cpu)
1482                 get_infop = current_infop->nvmet_ctx_start_cpu;
1483         else
1484                 get_infop = current_infop->nvmet_ctx_next_cpu;
1485
1486         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1487                 if (get_infop == current_infop) {
1488                         get_infop = get_infop->nvmet_ctx_next_cpu;
1489                         continue;
1490                 }
1491                 spin_lock(&get_infop->nvmet_ctx_list_lock);
1492
1493                 /* Just take the entire context list, if there are any */
1494                 if (get_infop->nvmet_ctx_list_cnt) {
1495                         list_splice_init(&get_infop->nvmet_ctx_list,
1496                                     &current_infop->nvmet_ctx_list);
1497                         current_infop->nvmet_ctx_list_cnt =
1498                                 get_infop->nvmet_ctx_list_cnt - 1;
1499                         get_infop->nvmet_ctx_list_cnt = 0;
1500                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
1501
1502                         current_infop->nvmet_ctx_start_cpu = get_infop;
1503                         list_remove_head(&current_infop->nvmet_ctx_list,
1504                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
1505                                          list);
1506                         return ctx_buf;
1507                 }
1508
1509                 /* Otherwise, move on to the next CPU for this MRQ */
1510                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1511                 get_infop = get_infop->nvmet_ctx_next_cpu;
1512         }
1513
1514 #endif
1515         /* Nothing found, all contexts for the MRQ are in-flight */
1516         return NULL;
1517 }
1518
1519 /**
1520  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1521  * @phba: pointer to lpfc hba data structure.
1522  * @idx: relative index of MRQ vector
1523  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1524  *
1525  * This routine is used for processing the WQE associated with a unsolicited
1526  * event. It first determines whether there is an existing ndlp that matches
1527  * the DID from the unsolicited WQE. If not, it will create a new one with
1528  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1529  * WQE is then used to invoke the proper routine and to set up proper state
1530  * of the discovery state machine.
1531  **/
1532 static void
1533 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1534                             uint32_t idx,
1535                             struct rqb_dmabuf *nvmebuf,
1536                             uint64_t isr_timestamp)
1537 {
1538         struct lpfc_nvmet_rcv_ctx *ctxp;
1539         struct lpfc_nvmet_tgtport *tgtp;
1540         struct fc_frame_header *fc_hdr;
1541         struct lpfc_nvmet_ctxbuf *ctx_buf;
1542         struct lpfc_nvmet_ctx_info *current_infop;
1543         uint32_t *payload;
1544         uint32_t size, oxid, sid, rc, qno;
1545         unsigned long iflag;
1546         int current_cpu;
1547 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1548         uint32_t id;
1549 #endif
1550
1551         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1552                 return;
1553
1554         ctx_buf = NULL;
1555         if (!nvmebuf || !phba->targetport) {
1556                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1557                                 "6157 NVMET FCP Drop IO\n");
1558                 oxid = 0;
1559                 size = 0;
1560                 sid = 0;
1561                 ctxp = NULL;
1562                 goto dropit;
1563         }
1564
1565         /*
1566          * Get a pointer to the context list for this MRQ based on
1567          * the CPU this MRQ IRQ is associated with. If the CPU association
1568          * changes from our initial assumption, the context list could
1569          * be empty, thus it would need to be replenished with the
1570          * context list from another CPU for this MRQ.
1571          */
1572         current_cpu = smp_processor_id();
1573         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1574         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1575         if (current_infop->nvmet_ctx_list_cnt) {
1576                 list_remove_head(&current_infop->nvmet_ctx_list,
1577                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1578                 current_infop->nvmet_ctx_list_cnt--;
1579         } else {
1580                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1581         }
1582         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1583
1584         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1585         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1586         size = nvmebuf->bytes_recv;
1587
1588 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1589         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1590                 id = smp_processor_id();
1591                 if (id < LPFC_CHECK_CPU_CNT)
1592                         phba->cpucheck_rcv_io[id]++;
1593         }
1594 #endif
1595
1596         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1597                          oxid, size, smp_processor_id());
1598
1599         if (!ctx_buf) {
1600                 /* Queue this NVME IO to process later */
1601                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1602                 list_add_tail(&nvmebuf->hbuf.list,
1603                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1604                 phba->sli4_hba.nvmet_io_wait_cnt++;
1605                 phba->sli4_hba.nvmet_io_wait_total++;
1606                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1607                                        iflag);
1608
1609                 /* Post a brand new DMA buffer to RQ */
1610                 qno = nvmebuf->idx;
1611                 lpfc_post_rq_buffer(
1612                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1613                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1614                 return;
1615         }
1616
1617         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1618         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1619         sid = sli4_sid_from_fc_hdr(fc_hdr);
1620
1621         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1622         if (ctxp->state != LPFC_NVMET_STE_FREE) {
1623                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1624                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1625                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1626         }
1627         ctxp->wqeq = NULL;
1628         ctxp->txrdy = NULL;
1629         ctxp->offset = 0;
1630         ctxp->phba = phba;
1631         ctxp->size = size;
1632         ctxp->oxid = oxid;
1633         ctxp->sid = sid;
1634         ctxp->idx = idx;
1635         ctxp->state = LPFC_NVMET_STE_RCV;
1636         ctxp->entry_cnt = 1;
1637         ctxp->flag = 0;
1638         ctxp->ctxbuf = ctx_buf;
1639         spin_lock_init(&ctxp->ctxlock);
1640
1641 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1642         if (phba->ktime_on) {
1643                 ctxp->ts_isr_cmd = isr_timestamp;
1644                 ctxp->ts_cmd_nvme = ktime_get_ns();
1645                 ctxp->ts_nvme_data = 0;
1646                 ctxp->ts_data_wqput = 0;
1647                 ctxp->ts_isr_data = 0;
1648                 ctxp->ts_data_nvme = 0;
1649                 ctxp->ts_nvme_status = 0;
1650                 ctxp->ts_status_wqput = 0;
1651                 ctxp->ts_isr_status = 0;
1652                 ctxp->ts_status_nvme = 0;
1653         }
1654 #endif
1655
1656         atomic_inc(&tgtp->rcv_fcp_cmd_in);
1657         /*
1658          * The calling sequence should be:
1659          * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1660          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1661          * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1662          * the NVME command / FC header is stored, so we are free to repost
1663          * the buffer.
1664          */
1665         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1666                                   payload, size);
1667
1668         /* Process FCP command */
1669         if (rc == 0) {
1670                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1671                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1672                 return;
1673         }
1674
1675         /* Processing of FCP command is deferred */
1676         if (rc == -EOVERFLOW) {
1677                 lpfc_nvmeio_data(phba,
1678                                  "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1679                                  oxid, size, sid);
1680                 /* defer reposting rcv buffer till .defer_rcv callback */
1681                 ctxp->rqb_buffer = nvmebuf;
1682                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1683                 return;
1684         }
1685
1686         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1687         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1688                         "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1689                         ctxp->oxid, rc,
1690                         atomic_read(&tgtp->rcv_fcp_cmd_in),
1691                         atomic_read(&tgtp->rcv_fcp_cmd_out),
1692                         atomic_read(&tgtp->xmt_fcp_release));
1693 dropit:
1694         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1695                          oxid, size, sid);
1696         if (oxid) {
1697                 lpfc_nvmet_defer_release(phba, ctxp);
1698                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1699                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1700                 return;
1701         }
1702
1703         if (ctx_buf)
1704                 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1705
1706         if (nvmebuf)
1707                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1708 }
1709
1710 /**
1711  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1712  * @phba: pointer to lpfc hba data structure.
1713  * @pring: pointer to a SLI ring.
1714  * @nvmebuf: pointer to received nvme data structure.
1715  *
1716  * This routine is used to process an unsolicited event received from a SLI
1717  * (Service Level Interface) ring. The actual processing of the data buffer
1718  * associated with the unsolicited event is done by invoking the routine
1719  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1720  * SLI RQ on which the unsolicited event was received.
1721  **/
1722 void
1723 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724                           struct lpfc_iocbq *piocb)
1725 {
1726         struct lpfc_dmabuf *d_buf;
1727         struct hbq_dmabuf *nvmebuf;
1728
1729         d_buf = piocb->context2;
1730         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1731
1732         if (phba->nvmet_support == 0) {
1733                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1734                 return;
1735         }
1736         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1737 }
1738
1739 /**
1740  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1741  * @phba: pointer to lpfc hba data structure.
1742  * @idx: relative index of MRQ vector
1743  * @nvmebuf: pointer to received nvme data structure.
1744  *
1745  * This routine is used to process an unsolicited event received from a SLI
1746  * (Service Level Interface) ring. The actual processing of the data buffer
1747  * associated with the unsolicited event is done by invoking the routine
1748  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1749  * SLI RQ on which the unsolicited event was received.
1750  **/
1751 void
1752 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1753                            uint32_t idx,
1754                            struct rqb_dmabuf *nvmebuf,
1755                            uint64_t isr_timestamp)
1756 {
1757         if (phba->nvmet_support == 0) {
1758                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1759                 return;
1760         }
1761         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
1762                                     isr_timestamp);
1763 }
1764
1765 /**
1766  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1767  * @phba: pointer to a host N_Port data structure.
1768  * @ctxp: Context info for NVME LS Request
1769  * @rspbuf: DMA buffer of NVME command.
1770  * @rspsize: size of the NVME command.
1771  *
1772  * This routine is used for allocating a lpfc-WQE data structure from
1773  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1774  * passed into the routine for discovery state machine to issue an Extended
1775  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1776  * and preparation routine that is used by all the discovery state machine
1777  * routines and the NVME command-specific fields will be later set up by
1778  * the individual discovery machine routines after calling this routine
1779  * allocating and preparing a generic WQE data structure. It fills in the
1780  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1781  * payload and response payload (if expected). The reference count on the
1782  * ndlp is incremented by 1 and the reference to the ndlp is put into
1783  * context1 of the WQE data structure for this WQE to hold the ndlp
1784  * reference for the command's callback function to access later.
1785  *
1786  * Return code
1787  *   Pointer to the newly allocated/prepared nvme wqe data structure
1788  *   NULL - when nvme wqe data structure allocation/preparation failed
1789  **/
1790 static struct lpfc_iocbq *
1791 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1792                        struct lpfc_nvmet_rcv_ctx *ctxp,
1793                        dma_addr_t rspbuf, uint16_t rspsize)
1794 {
1795         struct lpfc_nodelist *ndlp;
1796         struct lpfc_iocbq *nvmewqe;
1797         union lpfc_wqe *wqe;
1798
1799         if (!lpfc_is_link_up(phba)) {
1800                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1801                                 "6104 NVMET prep LS wqe: link err: "
1802                                 "NPORT x%x oxid:x%x ste %d\n",
1803                                 ctxp->sid, ctxp->oxid, ctxp->state);
1804                 return NULL;
1805         }
1806
1807         /* Allocate buffer for  command wqe */
1808         nvmewqe = lpfc_sli_get_iocbq(phba);
1809         if (nvmewqe == NULL) {
1810                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1811                                 "6105 NVMET prep LS wqe: No WQE: "
1812                                 "NPORT x%x oxid x%x ste %d\n",
1813                                 ctxp->sid, ctxp->oxid, ctxp->state);
1814                 return NULL;
1815         }
1816
1817         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1818         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1819             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1820             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1821                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1822                                 "6106 NVMET prep LS wqe: No ndlp: "
1823                                 "NPORT x%x oxid x%x ste %d\n",
1824                                 ctxp->sid, ctxp->oxid, ctxp->state);
1825                 goto nvme_wqe_free_wqeq_exit;
1826         }
1827         ctxp->wqeq = nvmewqe;
1828
1829         /* prevent preparing wqe with NULL ndlp reference */
1830         nvmewqe->context1 = lpfc_nlp_get(ndlp);
1831         if (nvmewqe->context1 == NULL)
1832                 goto nvme_wqe_free_wqeq_exit;
1833         nvmewqe->context2 = ctxp;
1834
1835         wqe = &nvmewqe->wqe;
1836         memset(wqe, 0, sizeof(union lpfc_wqe));
1837
1838         /* Words 0 - 2 */
1839         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1840         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1841         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1842         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1843
1844         /* Word 3 */
1845
1846         /* Word 4 */
1847
1848         /* Word 5 */
1849         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1850         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1851         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1852         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1853         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1854
1855         /* Word 6 */
1856         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1857                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1858         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1859
1860         /* Word 7 */
1861         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1862                CMD_XMIT_SEQUENCE64_WQE);
1863         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1864         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1865         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1866
1867         /* Word 8 */
1868         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1869
1870         /* Word 9 */
1871         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1872         /* Needs to be set by caller */
1873         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1874
1875         /* Word 10 */
1876         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1877         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1878         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1879                LPFC_WQE_LENLOC_WORD12);
1880         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1881
1882         /* Word 11 */
1883         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1884                LPFC_WQE_CQ_ID_DEFAULT);
1885         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1886                OTHER_COMMAND);
1887
1888         /* Word 12 */
1889         wqe->xmit_sequence.xmit_len = rspsize;
1890
1891         nvmewqe->retry = 1;
1892         nvmewqe->vport = phba->pport;
1893         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1894         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1895
1896         /* Xmit NVMET response to remote NPORT <did> */
1897         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1898                         "6039 Xmit NVMET LS response to remote "
1899                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1900                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1901                         rspsize);
1902         return nvmewqe;
1903
1904 nvme_wqe_free_wqeq_exit:
1905         nvmewqe->context2 = NULL;
1906         nvmewqe->context3 = NULL;
1907         lpfc_sli_release_iocbq(phba, nvmewqe);
1908         return NULL;
1909 }
1910
1911
1912 static struct lpfc_iocbq *
1913 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1914                         struct lpfc_nvmet_rcv_ctx *ctxp)
1915 {
1916         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1917         struct lpfc_nvmet_tgtport *tgtp;
1918         struct sli4_sge *sgl;
1919         struct lpfc_nodelist *ndlp;
1920         struct lpfc_iocbq *nvmewqe;
1921         struct scatterlist *sgel;
1922         union lpfc_wqe128 *wqe;
1923         uint32_t *txrdy;
1924         dma_addr_t physaddr;
1925         int i, cnt;
1926         int xc = 1;
1927
1928         if (!lpfc_is_link_up(phba)) {
1929                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1930                                 "6107 NVMET prep FCP wqe: link err:"
1931                                 "NPORT x%x oxid x%x ste %d\n",
1932                                 ctxp->sid, ctxp->oxid, ctxp->state);
1933                 return NULL;
1934         }
1935
1936         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1937         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1938             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1939              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1940                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1941                                 "6108 NVMET prep FCP wqe: no ndlp: "
1942                                 "NPORT x%x oxid x%x ste %d\n",
1943                                 ctxp->sid, ctxp->oxid, ctxp->state);
1944                 return NULL;
1945         }
1946
1947         if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1948                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1949                                 "6109 NVMET prep FCP wqe: seg cnt err: "
1950                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
1951                                 ctxp->sid, ctxp->oxid, ctxp->state,
1952                                 phba->cfg_nvme_seg_cnt);
1953                 return NULL;
1954         }
1955
1956         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1957         nvmewqe = ctxp->wqeq;
1958         if (nvmewqe == NULL) {
1959                 /* Allocate buffer for  command wqe */
1960                 nvmewqe = ctxp->ctxbuf->iocbq;
1961                 if (nvmewqe == NULL) {
1962                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1963                                         "6110 NVMET prep FCP wqe: No "
1964                                         "WQE: NPORT x%x oxid x%x ste %d\n",
1965                                         ctxp->sid, ctxp->oxid, ctxp->state);
1966                         return NULL;
1967                 }
1968                 ctxp->wqeq = nvmewqe;
1969                 xc = 0; /* create new XRI */
1970                 nvmewqe->sli4_lxritag = NO_XRI;
1971                 nvmewqe->sli4_xritag = NO_XRI;
1972         }
1973
1974         /* Sanity check */
1975         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1976             (ctxp->entry_cnt == 1)) ||
1977             (ctxp->state == LPFC_NVMET_STE_DATA)) {
1978                 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1979         } else {
1980                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1981                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
1982                                 ctxp->state, ctxp->entry_cnt);
1983                 return NULL;
1984         }
1985
1986         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1987         switch (rsp->op) {
1988         case NVMET_FCOP_READDATA:
1989         case NVMET_FCOP_READDATA_RSP:
1990                 /* Words 0 - 2 : The first sg segment */
1991                 sgel = &rsp->sg[0];
1992                 physaddr = sg_dma_address(sgel);
1993                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1994                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1995                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1996                 wqe->fcp_tsend.bde.addrHigh =
1997                         cpu_to_le32(putPaddrHigh(physaddr));
1998
1999                 /* Word 3 */
2000                 wqe->fcp_tsend.payload_offset_len = 0;
2001
2002                 /* Word 4 */
2003                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2004
2005                 /* Word 5 */
2006
2007                 /* Word 6 */
2008                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2009                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2010                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2011                        nvmewqe->sli4_xritag);
2012
2013                 /* Word 7 */
2014                 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
2015                 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
2016
2017                 /* Word 8 */
2018                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2019
2020                 /* Word 9 */
2021                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2022                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2023
2024                 /* Word 10 */
2025                 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2026                 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
2027                 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
2028                 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
2029                        LPFC_WQE_LENLOC_WORD12);
2030                 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
2031                 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
2032                 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2033                 if (phba->cfg_nvme_oas)
2034                         bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
2035
2036                 /* Word 11 */
2037                 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
2038                        LPFC_WQE_CQ_ID_DEFAULT);
2039                 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
2040                        FCP_COMMAND_TSEND);
2041
2042                 /* Word 12 */
2043                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2044
2045                 /* Setup 2 SKIP SGEs */
2046                 sgl->addr_hi = 0;
2047                 sgl->addr_lo = 0;
2048                 sgl->word2 = 0;
2049                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2050                 sgl->word2 = cpu_to_le32(sgl->word2);
2051                 sgl->sge_len = 0;
2052                 sgl++;
2053                 sgl->addr_hi = 0;
2054                 sgl->addr_lo = 0;
2055                 sgl->word2 = 0;
2056                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2057                 sgl->word2 = cpu_to_le32(sgl->word2);
2058                 sgl->sge_len = 0;
2059                 sgl++;
2060                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2061                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2062                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
2063                         if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
2064                             (rsp->rsplen == 12)) {
2065                                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
2066                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2067                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2068                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2069                         } else {
2070                                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2071                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2072                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2073                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2074                                        ((rsp->rsplen >> 2) - 1));
2075                                 memcpy(&wqe->words[16], rsp->rspaddr,
2076                                        rsp->rsplen);
2077                         }
2078                 } else {
2079                         atomic_inc(&tgtp->xmt_fcp_read);
2080
2081                         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2082                         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2083                         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2084                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2085                         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2086                 }
2087                 break;
2088
2089         case NVMET_FCOP_WRITEDATA:
2090                 /* Words 0 - 2 : The first sg segment */
2091                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2092                                        GFP_KERNEL, &physaddr);
2093                 if (!txrdy) {
2094                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2095                                         "6041 Bad txrdy buffer: oxid x%x\n",
2096                                         ctxp->oxid);
2097                         return NULL;
2098                 }
2099                 ctxp->txrdy = txrdy;
2100                 ctxp->txrdy_phys = physaddr;
2101                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2102                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2103                 wqe->fcp_treceive.bde.addrLow =
2104                         cpu_to_le32(putPaddrLow(physaddr));
2105                 wqe->fcp_treceive.bde.addrHigh =
2106                         cpu_to_le32(putPaddrHigh(physaddr));
2107
2108                 /* Word 3 */
2109                 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
2110
2111                 /* Word 4 */
2112                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2113
2114                 /* Word 5 */
2115
2116                 /* Word 6 */
2117                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2118                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2119                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2120                        nvmewqe->sli4_xritag);
2121
2122                 /* Word 7 */
2123                 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
2124                 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
2125                 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
2126                        CMD_FCP_TRECEIVE64_WQE);
2127
2128                 /* Word 8 */
2129                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2130
2131                 /* Word 9 */
2132                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2133                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2134
2135                 /* Word 10 */
2136                 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2137                 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
2138                 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
2139                 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
2140                        LPFC_WQE_LENLOC_WORD12);
2141                 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
2142                 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
2143                 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
2144                 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
2145                 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2146                 if (phba->cfg_nvme_oas)
2147                         bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
2148
2149                 /* Word 11 */
2150                 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
2151                        LPFC_WQE_CQ_ID_DEFAULT);
2152                 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
2153                        FCP_COMMAND_TRECEIVE);
2154                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2155
2156                 /* Word 12 */
2157                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2158
2159                 /* Setup 1 TXRDY and 1 SKIP SGE */
2160                 txrdy[0] = 0;
2161                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2162                 txrdy[2] = 0;
2163
2164                 sgl->addr_hi = putPaddrHigh(physaddr);
2165                 sgl->addr_lo = putPaddrLow(physaddr);
2166                 sgl->word2 = 0;
2167                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2168                 sgl->word2 = cpu_to_le32(sgl->word2);
2169                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2170                 sgl++;
2171                 sgl->addr_hi = 0;
2172                 sgl->addr_lo = 0;
2173                 sgl->word2 = 0;
2174                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2175                 sgl->word2 = cpu_to_le32(sgl->word2);
2176                 sgl->sge_len = 0;
2177                 sgl++;
2178                 atomic_inc(&tgtp->xmt_fcp_write);
2179                 break;
2180
2181         case NVMET_FCOP_RSP:
2182                 /* Words 0 - 2 */
2183                 physaddr = rsp->rspdma;
2184                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2185                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2186                 wqe->fcp_trsp.bde.addrLow =
2187                         cpu_to_le32(putPaddrLow(physaddr));
2188                 wqe->fcp_trsp.bde.addrHigh =
2189                         cpu_to_le32(putPaddrHigh(physaddr));
2190
2191                 /* Word 3 */
2192                 wqe->fcp_trsp.response_len = rsp->rsplen;
2193
2194                 /* Word 4 */
2195                 wqe->fcp_trsp.rsvd_4_5[0] = 0;
2196
2197
2198                 /* Word 5 */
2199
2200                 /* Word 6 */
2201                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2202                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2203                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2204                        nvmewqe->sli4_xritag);
2205
2206                 /* Word 7 */
2207                 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
2208                 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
2209                 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
2210
2211                 /* Word 8 */
2212                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2213
2214                 /* Word 9 */
2215                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2216                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2217
2218                 /* Word 10 */
2219                 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2220                 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
2221                 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
2222                 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
2223                        LPFC_WQE_LENLOC_WORD3);
2224                 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
2225                 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2226                 if (phba->cfg_nvme_oas)
2227                         bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
2228
2229                 /* Word 11 */
2230                 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
2231                        LPFC_WQE_CQ_ID_DEFAULT);
2232                 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
2233                        FCP_COMMAND_TRSP);
2234                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2235
2236                 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2237                         /* Good response - all zero's on wire */
2238                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
2239                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
2240                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
2241                 } else {
2242                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2243                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2244                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2245                                ((rsp->rsplen >> 2) - 1));
2246                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2247                 }
2248
2249                 /* Use rspbuf, NOT sg list */
2250                 rsp->sg_cnt = 0;
2251                 sgl->word2 = 0;
2252                 atomic_inc(&tgtp->xmt_fcp_rsp);
2253                 break;
2254
2255         default:
2256                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2257                                 "6064 Unknown Rsp Op %d\n",
2258                                 rsp->op);
2259                 return NULL;
2260         }
2261
2262         nvmewqe->retry = 1;
2263         nvmewqe->vport = phba->pport;
2264         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2265         nvmewqe->context1 = ndlp;
2266
2267         for (i = 0; i < rsp->sg_cnt; i++) {
2268                 sgel = &rsp->sg[i];
2269                 physaddr = sg_dma_address(sgel);
2270                 cnt = sg_dma_len(sgel);
2271                 sgl->addr_hi = putPaddrHigh(physaddr);
2272                 sgl->addr_lo = putPaddrLow(physaddr);
2273                 sgl->word2 = 0;
2274                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2275                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2276                 if ((i+1) == rsp->sg_cnt)
2277                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2278                 sgl->word2 = cpu_to_le32(sgl->word2);
2279                 sgl->sge_len = cpu_to_le32(cnt);
2280                 sgl++;
2281                 ctxp->offset += cnt;
2282         }
2283         ctxp->state = LPFC_NVMET_STE_DATA;
2284         ctxp->entry_cnt++;
2285         return nvmewqe;
2286 }
2287
2288 /**
2289  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2290  * @phba: Pointer to HBA context object.
2291  * @cmdwqe: Pointer to driver command WQE object.
2292  * @wcqe: Pointer to driver response CQE object.
2293  *
2294  * The function is called from SLI ring event handler with no
2295  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2296  * The function frees memory resources used for the NVME commands.
2297  **/
2298 static void
2299 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2300                              struct lpfc_wcqe_complete *wcqe)
2301 {
2302         struct lpfc_nvmet_rcv_ctx *ctxp;
2303         struct lpfc_nvmet_tgtport *tgtp;
2304         uint32_t status, result;
2305         unsigned long flags;
2306         bool released = false;
2307
2308         ctxp = cmdwqe->context2;
2309         status = bf_get(lpfc_wcqe_c_status, wcqe);
2310         result = wcqe->parameter;
2311
2312         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2313         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2314                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2315
2316         ctxp->state = LPFC_NVMET_STE_DONE;
2317
2318         /* Check if we already received a free context call
2319          * and we have completed processing an abort situation.
2320          */
2321         spin_lock_irqsave(&ctxp->ctxlock, flags);
2322         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2323             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2324                 list_del(&ctxp->list);
2325                 released = true;
2326         }
2327         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2328         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2329         atomic_inc(&tgtp->xmt_abort_rsp);
2330
2331         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2332                         "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2333                         "WCQE: %08x %08x %08x %08x\n",
2334                         ctxp->oxid, ctxp->flag, released,
2335                         wcqe->word0, wcqe->total_data_placed,
2336                         result, wcqe->word3);
2337
2338         cmdwqe->context2 = NULL;
2339         cmdwqe->context3 = NULL;
2340         /*
2341          * if transport has released ctx, then can reuse it. Otherwise,
2342          * will be recycled by transport release call.
2343          */
2344         if (released)
2345                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2346
2347         /* This is the iocbq for the abort, not the command */
2348         lpfc_sli_release_iocbq(phba, cmdwqe);
2349
2350         /* Since iaab/iaar are NOT set, there is no work left.
2351          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2352          * should have been called already.
2353          */
2354 }
2355
2356 /**
2357  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2358  * @phba: Pointer to HBA context object.
2359  * @cmdwqe: Pointer to driver command WQE object.
2360  * @wcqe: Pointer to driver response CQE object.
2361  *
2362  * The function is called from SLI ring event handler with no
2363  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2364  * The function frees memory resources used for the NVME commands.
2365  **/
2366 static void
2367 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2368                                struct lpfc_wcqe_complete *wcqe)
2369 {
2370         struct lpfc_nvmet_rcv_ctx *ctxp;
2371         struct lpfc_nvmet_tgtport *tgtp;
2372         unsigned long flags;
2373         uint32_t status, result;
2374         bool released = false;
2375
2376         ctxp = cmdwqe->context2;
2377         status = bf_get(lpfc_wcqe_c_status, wcqe);
2378         result = wcqe->parameter;
2379
2380         if (!ctxp) {
2381                 /* if context is clear, related io alrady complete */
2382                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2383                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2384                                 wcqe->word0, wcqe->total_data_placed,
2385                                 result, wcqe->word3);
2386                 return;
2387         }
2388
2389         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2390         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2391                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2392
2393         /* Sanity check */
2394         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2395                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2396                                 "6112 ABTS Wrong state:%d oxid x%x\n",
2397                                 ctxp->state, ctxp->oxid);
2398         }
2399
2400         /* Check if we already received a free context call
2401          * and we have completed processing an abort situation.
2402          */
2403         ctxp->state = LPFC_NVMET_STE_DONE;
2404         spin_lock_irqsave(&ctxp->ctxlock, flags);
2405         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2406             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2407                 list_del(&ctxp->list);
2408                 released = true;
2409         }
2410         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2411         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2412         atomic_inc(&tgtp->xmt_abort_rsp);
2413
2414         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2415                         "6316 ABTS cmpl xri x%x flg x%x (%x) "
2416                         "WCQE: %08x %08x %08x %08x\n",
2417                         ctxp->oxid, ctxp->flag, released,
2418                         wcqe->word0, wcqe->total_data_placed,
2419                         result, wcqe->word3);
2420
2421         cmdwqe->context2 = NULL;
2422         cmdwqe->context3 = NULL;
2423         /*
2424          * if transport has released ctx, then can reuse it. Otherwise,
2425          * will be recycled by transport release call.
2426          */
2427         if (released)
2428                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2429
2430         /* Since iaab/iaar are NOT set, there is no work left.
2431          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2432          * should have been called already.
2433          */
2434 }
2435
2436 /**
2437  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2438  * @phba: Pointer to HBA context object.
2439  * @cmdwqe: Pointer to driver command WQE object.
2440  * @wcqe: Pointer to driver response CQE object.
2441  *
2442  * The function is called from SLI ring event handler with no
2443  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2444  * The function frees memory resources used for the NVME commands.
2445  **/
2446 static void
2447 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2448                             struct lpfc_wcqe_complete *wcqe)
2449 {
2450         struct lpfc_nvmet_rcv_ctx *ctxp;
2451         struct lpfc_nvmet_tgtport *tgtp;
2452         uint32_t status, result;
2453
2454         ctxp = cmdwqe->context2;
2455         status = bf_get(lpfc_wcqe_c_status, wcqe);
2456         result = wcqe->parameter;
2457
2458         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2459         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2460
2461         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2462                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2463                         ctxp, wcqe->word0, wcqe->total_data_placed,
2464                         result, wcqe->word3);
2465
2466         if (!ctxp) {
2467                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2468                                 "6415 NVMET LS Abort No ctx: WCQE: "
2469                                  "%08x %08x %08x %08x\n",
2470                                 wcqe->word0, wcqe->total_data_placed,
2471                                 result, wcqe->word3);
2472
2473                 lpfc_sli_release_iocbq(phba, cmdwqe);
2474                 return;
2475         }
2476
2477         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2478                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2479                                 "6416 NVMET LS abort cmpl state mismatch: "
2480                                 "oxid x%x: %d %d\n",
2481                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2482         }
2483
2484         cmdwqe->context2 = NULL;
2485         cmdwqe->context3 = NULL;
2486         lpfc_sli_release_iocbq(phba, cmdwqe);
2487         kfree(ctxp);
2488 }
2489
2490 static int
2491 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2492                              struct lpfc_nvmet_rcv_ctx *ctxp,
2493                              uint32_t sid, uint16_t xri)
2494 {
2495         struct lpfc_nvmet_tgtport *tgtp;
2496         struct lpfc_iocbq *abts_wqeq;
2497         union lpfc_wqe *wqe_abts;
2498         struct lpfc_nodelist *ndlp;
2499
2500         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2501                         "6067 ABTS: sid %x xri x%x/x%x\n",
2502                         sid, xri, ctxp->wqeq->sli4_xritag);
2503
2504         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2505
2506         ndlp = lpfc_findnode_did(phba->pport, sid);
2507         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2508             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2509             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2510                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2511                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2512                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2513                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2514
2515                 /* No failure to an ABTS request. */
2516                 return 0;
2517         }
2518
2519         abts_wqeq = ctxp->wqeq;
2520         wqe_abts = &abts_wqeq->wqe;
2521
2522         /*
2523          * Since we zero the whole WQE, we need to ensure we set the WQE fields
2524          * that were initialized in lpfc_sli4_nvmet_alloc.
2525          */
2526         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2527
2528         /* Word 5 */
2529         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2530         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2531         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2532         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2533         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2534
2535         /* Word 6 */
2536         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2537                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2538         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2539                abts_wqeq->sli4_xritag);
2540
2541         /* Word 7 */
2542         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2543                CMD_XMIT_SEQUENCE64_WQE);
2544         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2545         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2546         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2547
2548         /* Word 8 */
2549         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2550
2551         /* Word 9 */
2552         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2553         /* Needs to be set by caller */
2554         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2555
2556         /* Word 10 */
2557         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2558         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2559                LPFC_WQE_LENLOC_WORD12);
2560         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2561         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2562
2563         /* Word 11 */
2564         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2565                LPFC_WQE_CQ_ID_DEFAULT);
2566         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2567                OTHER_COMMAND);
2568
2569         abts_wqeq->vport = phba->pport;
2570         abts_wqeq->context1 = ndlp;
2571         abts_wqeq->context2 = ctxp;
2572         abts_wqeq->context3 = NULL;
2573         abts_wqeq->rsvd2 = 0;
2574         /* hba_wqidx should already be setup from command we are aborting */
2575         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2576         abts_wqeq->iocb.ulpLe = 1;
2577
2578         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2579                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
2580                         xri, abts_wqeq->iotag);
2581         return 1;
2582 }
2583
2584 static int
2585 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2586                                struct lpfc_nvmet_rcv_ctx *ctxp,
2587                                uint32_t sid, uint16_t xri)
2588 {
2589         struct lpfc_nvmet_tgtport *tgtp;
2590         struct lpfc_iocbq *abts_wqeq;
2591         union lpfc_wqe *abts_wqe;
2592         struct lpfc_nodelist *ndlp;
2593         unsigned long flags;
2594         int rc;
2595
2596         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2597         if (!ctxp->wqeq) {
2598                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2599                 ctxp->wqeq->hba_wqidx = 0;
2600         }
2601
2602         ndlp = lpfc_findnode_did(phba->pport, sid);
2603         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2604             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2605             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2606                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2607                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2608                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2609                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2610
2611                 /* No failure to an ABTS request. */
2612                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2613                 return 0;
2614         }
2615
2616         /* Issue ABTS for this WQE based on iotag */
2617         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2618         if (!ctxp->abort_wqeq) {
2619                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2620                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2621                                 "6161 ABORT failed: No wqeqs: "
2622                                 "xri: x%x\n", ctxp->oxid);
2623                 /* No failure to an ABTS request. */
2624                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2625                 return 0;
2626         }
2627         abts_wqeq = ctxp->abort_wqeq;
2628         abts_wqe = &abts_wqeq->wqe;
2629         ctxp->state = LPFC_NVMET_STE_ABORT;
2630
2631         /* Announce entry to new IO submit field. */
2632         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2633                         "6162 ABORT Request to rport DID x%06x "
2634                         "for xri x%x x%x\n",
2635                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2636
2637         /* If the hba is getting reset, this flag is set.  It is
2638          * cleared when the reset is complete and rings reestablished.
2639          */
2640         spin_lock_irqsave(&phba->hbalock, flags);
2641         /* driver queued commands are in process of being flushed */
2642         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2643                 spin_unlock_irqrestore(&phba->hbalock, flags);
2644                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2645                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2646                                 "6163 Driver in reset cleanup - flushing "
2647                                 "NVME Req now. hba_flag x%x oxid x%x\n",
2648                                 phba->hba_flag, ctxp->oxid);
2649                 lpfc_sli_release_iocbq(phba, abts_wqeq);
2650                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2651                 return 0;
2652         }
2653
2654         /* Outstanding abort is in progress */
2655         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2656                 spin_unlock_irqrestore(&phba->hbalock, flags);
2657                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2658                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2659                                 "6164 Outstanding NVME I/O Abort Request "
2660                                 "still pending on oxid x%x\n",
2661                                 ctxp->oxid);
2662                 lpfc_sli_release_iocbq(phba, abts_wqeq);
2663                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2664                 return 0;
2665         }
2666
2667         /* Ready - mark outstanding as aborted by driver. */
2668         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2669
2670         /* WQEs are reused.  Clear stale data and set key fields to
2671          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2672          */
2673         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2674
2675         /* word 3 */
2676         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2677
2678         /* word 7 */
2679         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2680         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2681
2682         /* word 8 - tell the FW to abort the IO associated with this
2683          * outstanding exchange ID.
2684          */
2685         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2686
2687         /* word 9 - this is the iotag for the abts_wqe completion. */
2688         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2689                abts_wqeq->iotag);
2690
2691         /* word 10 */
2692         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2693         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2694
2695         /* word 11 */
2696         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2697         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2698         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2699
2700         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2701         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2702         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2703         abts_wqeq->iocb_cmpl = 0;
2704         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2705         abts_wqeq->context2 = ctxp;
2706         abts_wqeq->vport = phba->pport;
2707         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2708         spin_unlock_irqrestore(&phba->hbalock, flags);
2709         if (rc == WQE_SUCCESS) {
2710                 atomic_inc(&tgtp->xmt_abort_sol);
2711                 return 0;
2712         }
2713
2714         atomic_inc(&tgtp->xmt_abort_rsp_error);
2715         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2716         lpfc_sli_release_iocbq(phba, abts_wqeq);
2717         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2718                         "6166 Failed ABORT issue_wqe with status x%x "
2719                         "for oxid x%x.\n",
2720                         rc, ctxp->oxid);
2721         return 1;
2722 }
2723
2724
2725 static int
2726 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2727                                  struct lpfc_nvmet_rcv_ctx *ctxp,
2728                                  uint32_t sid, uint16_t xri)
2729 {
2730         struct lpfc_nvmet_tgtport *tgtp;
2731         struct lpfc_iocbq *abts_wqeq;
2732         unsigned long flags;
2733         int rc;
2734
2735         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2736         if (!ctxp->wqeq) {
2737                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2738                 ctxp->wqeq->hba_wqidx = 0;
2739         }
2740
2741         if (ctxp->state == LPFC_NVMET_STE_FREE) {
2742                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2743                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
2744                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2745                 rc = WQE_BUSY;
2746                 goto aerr;
2747         }
2748         ctxp->state = LPFC_NVMET_STE_ABORT;
2749         ctxp->entry_cnt++;
2750         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2751         if (rc == 0)
2752                 goto aerr;
2753
2754         spin_lock_irqsave(&phba->hbalock, flags);
2755         abts_wqeq = ctxp->wqeq;
2756         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2757         abts_wqeq->iocb_cmpl = NULL;
2758         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2759         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2760         spin_unlock_irqrestore(&phba->hbalock, flags);
2761         if (rc == WQE_SUCCESS) {
2762                 return 0;
2763         }
2764
2765 aerr:
2766         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2767         atomic_inc(&tgtp->xmt_abort_rsp_error);
2768         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2769                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2770                         ctxp->oxid, rc);
2771         return 1;
2772 }
2773
2774 static int
2775 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2776                                 struct lpfc_nvmet_rcv_ctx *ctxp,
2777                                 uint32_t sid, uint16_t xri)
2778 {
2779         struct lpfc_nvmet_tgtport *tgtp;
2780         struct lpfc_iocbq *abts_wqeq;
2781         union lpfc_wqe *wqe_abts;
2782         unsigned long flags;
2783         int rc;
2784
2785         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
2786             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
2787                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2788                 ctxp->entry_cnt++;
2789         } else {
2790                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2791                                 "6418 NVMET LS abort state mismatch "
2792                                 "IO x%x: %d %d\n",
2793                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2794                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2795         }
2796
2797         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2798         if (!ctxp->wqeq) {
2799                 /* Issue ABTS for this WQE based on iotag */
2800                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2801                 if (!ctxp->wqeq) {
2802                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2803                                         "6068 Abort failed: No wqeqs: "
2804                                         "xri: x%x\n", xri);
2805                         /* No failure to an ABTS request. */
2806                         kfree(ctxp);
2807                         return 0;
2808                 }
2809         }
2810         abts_wqeq = ctxp->wqeq;
2811         wqe_abts = &abts_wqeq->wqe;
2812
2813         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
2814                 rc = WQE_BUSY;
2815                 goto out;
2816         }
2817
2818         spin_lock_irqsave(&phba->hbalock, flags);
2819         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2820         abts_wqeq->iocb_cmpl = 0;
2821         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
2822         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2823         spin_unlock_irqrestore(&phba->hbalock, flags);
2824         if (rc == WQE_SUCCESS) {
2825                 atomic_inc(&tgtp->xmt_abort_unsol);
2826                 return 0;
2827         }
2828 out:
2829         atomic_inc(&tgtp->xmt_abort_rsp_error);
2830         abts_wqeq->context2 = NULL;
2831         abts_wqeq->context3 = NULL;
2832         lpfc_sli_release_iocbq(phba, abts_wqeq);
2833         kfree(ctxp);
2834         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2835                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
2836         return 0;
2837 }