1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 struct lpfc_nvmet_rcv_ctx *,
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 struct lpfc_nvmet_rcv_ctx *,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 struct lpfc_nvmet_rcv_ctx *,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 struct lpfc_nvmet_rcv_ctx *,
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp->oxid, ctxp->flag);
83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
89 ctxp->flag |= LPFC_NVMET_CTX_RLS;
90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96 * @phba: Pointer to HBA context object.
97 * @cmdwqe: Pointer to driver command WQE object.
98 * @wcqe: Pointer to driver response CQE object.
100 * The function is called from SLI ring event handler with no
101 * lock held. This function is the completion handler for NVME LS commands
102 * The function frees memory resources used for the NVME commands.
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106 struct lpfc_wcqe_complete *wcqe)
108 struct lpfc_nvmet_tgtport *tgtp;
109 struct nvmefc_tgt_ls_req *rsp;
110 struct lpfc_nvmet_rcv_ctx *ctxp;
111 uint32_t status, result;
113 status = bf_get(lpfc_wcqe_c_status, wcqe);
114 result = wcqe->parameter;
115 ctxp = cmdwqe->context2;
117 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
118 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
119 "6410 NVMET LS cmpl state mismatch IO x%x: "
121 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
124 if (!phba->targetport)
127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
130 atomic_inc(&tgtp->xmt_ls_rsp_error);
132 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
135 rsp = &ctxp->ctx.ls_req;
137 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
138 ctxp->oxid, status, result);
140 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
141 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
142 status, result, ctxp->oxid);
144 lpfc_nlp_put(cmdwqe->context1);
145 cmdwqe->context2 = NULL;
146 cmdwqe->context3 = NULL;
147 lpfc_sli_release_iocbq(phba, cmdwqe);
153 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
154 * @phba: HBA buffer is associated with
155 * @ctxp: context to clean up
156 * @mp: Buffer to free
158 * Description: Frees the given DMA buffer in the appropriate way given by
159 * reposting it to its associated RQ so it can be reused.
161 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
169 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
170 struct lpfc_nvmet_tgtport *tgtp;
171 struct fc_frame_header *fc_hdr;
172 struct rqb_dmabuf *nvmebuf;
173 struct lpfc_nvmet_ctx_info *infop;
175 uint32_t size, oxid, sid, rc;
180 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
183 ctxp->txrdy_phys = 0;
186 if (ctxp->state == LPFC_NVMET_STE_FREE) {
187 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
188 "6411 NVMET free, already free IO x%x: %d %d\n",
189 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
191 ctxp->state = LPFC_NVMET_STE_FREE;
193 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
194 if (phba->sli4_hba.nvmet_io_wait_cnt) {
195 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
196 nvmebuf, struct rqb_dmabuf,
198 phba->sli4_hba.nvmet_io_wait_cnt--;
199 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
202 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
203 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
204 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
205 payload = (uint32_t *)(nvmebuf->dbuf.virt);
206 size = nvmebuf->bytes_recv;
207 sid = sli4_sid_from_fc_hdr(fc_hdr);
209 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
217 ctxp->state = LPFC_NVMET_STE_RCV;
220 ctxp->ctxbuf = ctx_buf;
221 spin_lock_init(&ctxp->ctxlock);
223 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
224 if (phba->ktime_on) {
225 ctxp->ts_cmd_nvme = ktime_get_ns();
226 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
227 ctxp->ts_nvme_data = 0;
228 ctxp->ts_data_wqput = 0;
229 ctxp->ts_isr_data = 0;
230 ctxp->ts_data_nvme = 0;
231 ctxp->ts_nvme_status = 0;
232 ctxp->ts_status_wqput = 0;
233 ctxp->ts_isr_status = 0;
234 ctxp->ts_status_nvme = 0;
237 atomic_inc(&tgtp->rcv_fcp_cmd_in);
239 * The calling sequence should be:
240 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
241 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
242 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
243 * the NVME command / FC header is stored.
244 * A buffer has already been reposted for this IO, so just free
247 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
250 /* Process FCP command */
252 atomic_inc(&tgtp->rcv_fcp_cmd_out);
253 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
257 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
258 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
259 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
261 atomic_read(&tgtp->rcv_fcp_cmd_in),
262 atomic_read(&tgtp->rcv_fcp_cmd_out),
263 atomic_read(&tgtp->xmt_fcp_release));
265 lpfc_nvmet_defer_release(phba, ctxp);
266 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
267 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
270 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
273 * Use the CPU context list, from the MRQ the IO was received on
274 * (ctxp->idx), to save context structure.
276 cpu = smp_processor_id();
277 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
278 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
279 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
280 infop->nvmet_ctx_list_cnt++;
281 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
285 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
287 lpfc_nvmet_ktime(struct lpfc_hba *phba,
288 struct lpfc_nvmet_rcv_ctx *ctxp)
290 uint64_t seg1, seg2, seg3, seg4, seg5;
291 uint64_t seg6, seg7, seg8, seg9, seg10;
296 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
297 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
298 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
299 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
300 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
303 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
305 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
307 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
309 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
311 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
313 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
315 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
317 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
319 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
322 * Segment 1 - Time from FCP command received by MSI-X ISR
323 * to FCP command is passed to NVME Layer.
324 * Segment 2 - Time from FCP command payload handed
325 * off to NVME Layer to Driver receives a Command op
327 * Segment 3 - Time from Driver receives a Command op
328 * from NVME Layer to Command is put on WQ.
329 * Segment 4 - Time from Driver WQ put is done
330 * to MSI-X ISR for Command cmpl.
331 * Segment 5 - Time from MSI-X ISR for Command cmpl to
332 * Command cmpl is passed to NVME Layer.
333 * Segment 6 - Time from Command cmpl is passed to NVME
334 * Layer to Driver receives a RSP op from NVME Layer.
335 * Segment 7 - Time from Driver receives a RSP op from
336 * NVME Layer to WQ put is done on TRSP FCP Status.
337 * Segment 8 - Time from Driver WQ put is done on TRSP
338 * FCP Status to MSI-X ISR for TRSP cmpl.
339 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
340 * TRSP cmpl is passed to NVME Layer.
341 * Segment 10 - Time from FCP command received by
342 * MSI-X ISR to command is completed on wire.
343 * (Segments 1 thru 8) for READDATA / WRITEDATA
344 * (Segments 1 thru 4) for READDATA_RSP
346 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
347 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
348 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
350 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
352 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
353 seg1 - seg2 - seg3 - seg4;
355 /* For auto rsp commands seg6 thru seg10 will be 0 */
356 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
357 seg6 = (ctxp->ts_nvme_status -
359 seg1 - seg2 - seg3 - seg4 - seg5;
360 seg7 = (ctxp->ts_status_wqput -
364 seg8 = (ctxp->ts_isr_status -
366 seg1 - seg2 - seg3 - seg4 -
368 seg9 = (ctxp->ts_status_nvme -
370 seg1 - seg2 - seg3 - seg4 -
371 seg5 - seg6 - seg7 - seg8;
372 seg10 = (ctxp->ts_isr_status -
379 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
382 phba->ktime_seg1_total += seg1;
383 if (seg1 < phba->ktime_seg1_min)
384 phba->ktime_seg1_min = seg1;
385 else if (seg1 > phba->ktime_seg1_max)
386 phba->ktime_seg1_max = seg1;
388 phba->ktime_seg2_total += seg2;
389 if (seg2 < phba->ktime_seg2_min)
390 phba->ktime_seg2_min = seg2;
391 else if (seg2 > phba->ktime_seg2_max)
392 phba->ktime_seg2_max = seg2;
394 phba->ktime_seg3_total += seg3;
395 if (seg3 < phba->ktime_seg3_min)
396 phba->ktime_seg3_min = seg3;
397 else if (seg3 > phba->ktime_seg3_max)
398 phba->ktime_seg3_max = seg3;
400 phba->ktime_seg4_total += seg4;
401 if (seg4 < phba->ktime_seg4_min)
402 phba->ktime_seg4_min = seg4;
403 else if (seg4 > phba->ktime_seg4_max)
404 phba->ktime_seg4_max = seg4;
406 phba->ktime_seg5_total += seg5;
407 if (seg5 < phba->ktime_seg5_min)
408 phba->ktime_seg5_min = seg5;
409 else if (seg5 > phba->ktime_seg5_max)
410 phba->ktime_seg5_max = seg5;
412 phba->ktime_data_samples++;
416 phba->ktime_seg6_total += seg6;
417 if (seg6 < phba->ktime_seg6_min)
418 phba->ktime_seg6_min = seg6;
419 else if (seg6 > phba->ktime_seg6_max)
420 phba->ktime_seg6_max = seg6;
422 phba->ktime_seg7_total += seg7;
423 if (seg7 < phba->ktime_seg7_min)
424 phba->ktime_seg7_min = seg7;
425 else if (seg7 > phba->ktime_seg7_max)
426 phba->ktime_seg7_max = seg7;
428 phba->ktime_seg8_total += seg8;
429 if (seg8 < phba->ktime_seg8_min)
430 phba->ktime_seg8_min = seg8;
431 else if (seg8 > phba->ktime_seg8_max)
432 phba->ktime_seg8_max = seg8;
434 phba->ktime_seg9_total += seg9;
435 if (seg9 < phba->ktime_seg9_min)
436 phba->ktime_seg9_min = seg9;
437 else if (seg9 > phba->ktime_seg9_max)
438 phba->ktime_seg9_max = seg9;
440 phba->ktime_seg10_total += seg10;
441 if (seg10 < phba->ktime_seg10_min)
442 phba->ktime_seg10_min = seg10;
443 else if (seg10 > phba->ktime_seg10_max)
444 phba->ktime_seg10_max = seg10;
445 phba->ktime_status_samples++;
450 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
451 * @phba: Pointer to HBA context object.
452 * @cmdwqe: Pointer to driver command WQE object.
453 * @wcqe: Pointer to driver response CQE object.
455 * The function is called from SLI ring event handler with no
456 * lock held. This function is the completion handler for NVME FCP commands
457 * The function frees memory resources used for the NVME commands.
460 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
461 struct lpfc_wcqe_complete *wcqe)
463 struct lpfc_nvmet_tgtport *tgtp;
464 struct nvmefc_tgt_fcp_req *rsp;
465 struct lpfc_nvmet_rcv_ctx *ctxp;
466 uint32_t status, result, op, start_clean;
467 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
471 ctxp = cmdwqe->context2;
472 ctxp->flag &= ~LPFC_NVMET_IO_INP;
474 rsp = &ctxp->ctx.fcp_req;
477 status = bf_get(lpfc_wcqe_c_status, wcqe);
478 result = wcqe->parameter;
480 if (phba->targetport)
481 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
485 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
486 ctxp->oxid, op, status);
489 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
490 rsp->transferred_length = 0;
492 atomic_inc(&tgtp->xmt_fcp_rsp_error);
494 /* pick up SLI4 exhange busy condition */
495 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
496 ctxp->flag |= LPFC_NVMET_XBUSY;
498 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
499 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
500 ctxp->oxid, status, result);
502 ctxp->flag &= ~LPFC_NVMET_XBUSY;
506 rsp->fcp_error = NVME_SC_SUCCESS;
507 if (op == NVMET_FCOP_RSP)
508 rsp->transferred_length = rsp->rsplen;
510 rsp->transferred_length = rsp->transfer_length;
512 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
515 if ((op == NVMET_FCOP_READDATA_RSP) ||
516 (op == NVMET_FCOP_RSP)) {
518 ctxp->state = LPFC_NVMET_STE_DONE;
521 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
522 if (phba->ktime_on) {
523 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
525 cmdwqe->isr_timestamp;
528 ctxp->ts_nvme_status =
530 ctxp->ts_status_wqput =
532 ctxp->ts_isr_status =
534 ctxp->ts_status_nvme =
537 ctxp->ts_isr_status =
538 cmdwqe->isr_timestamp;
539 ctxp->ts_status_nvme =
543 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
544 id = smp_processor_id();
546 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
547 "6703 CPU Check cmpl: "
548 "cpu %d expect %d\n",
550 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
551 phba->cpucheck_cmpl_io[id]++;
555 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
557 lpfc_nvmet_ktime(phba, ctxp);
559 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
562 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
563 memset(((char *)cmdwqe) + start_clean, 0,
564 (sizeof(struct lpfc_iocbq) - start_clean));
565 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
566 if (phba->ktime_on) {
567 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
568 ctxp->ts_data_nvme = ktime_get_ns();
570 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
571 id = smp_processor_id();
573 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
574 "6704 CPU Check cmdcmpl: "
575 "cpu %d expect %d\n",
577 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
578 phba->cpucheck_ccmpl_io[id]++;
586 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
587 struct nvmefc_tgt_ls_req *rsp)
589 struct lpfc_nvmet_rcv_ctx *ctxp =
590 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
591 struct lpfc_hba *phba = ctxp->phba;
592 struct hbq_dmabuf *nvmebuf =
593 (struct hbq_dmabuf *)ctxp->rqb_buffer;
594 struct lpfc_iocbq *nvmewqeq;
595 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
596 struct lpfc_dmabuf dmabuf;
597 struct ulp_bde64 bpl;
600 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
601 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
603 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
604 (ctxp->entry_cnt != 1)) {
605 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
606 "6412 NVMET LS rsp state mismatch "
608 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
610 ctxp->state = LPFC_NVMET_STE_LS_RSP;
613 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
615 if (nvmewqeq == NULL) {
616 atomic_inc(&nvmep->xmt_ls_drop);
617 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
618 "6150 LS Drop IO x%x: Prep\n",
620 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
621 atomic_inc(&nvmep->xmt_ls_abort);
622 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
623 ctxp->sid, ctxp->oxid);
627 /* Save numBdes for bpl2sgl */
629 nvmewqeq->hba_wqidx = 0;
630 nvmewqeq->context3 = &dmabuf;
632 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
633 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
634 bpl.tus.f.bdeSize = rsp->rsplen;
635 bpl.tus.f.bdeFlags = 0;
636 bpl.tus.w = le32_to_cpu(bpl.tus.w);
638 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
639 nvmewqeq->iocb_cmpl = NULL;
640 nvmewqeq->context2 = ctxp;
642 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
643 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
645 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
646 if (rc == WQE_SUCCESS) {
648 * Okay to repost buffer here, but wait till cmpl
649 * before freeing ctxp and iocbq.
651 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
652 ctxp->rqb_buffer = 0;
653 atomic_inc(&nvmep->xmt_ls_rsp);
656 /* Give back resources */
657 atomic_inc(&nvmep->xmt_ls_drop);
658 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
659 "6151 LS Drop IO x%x: Issue %d\n",
662 lpfc_nlp_put(nvmewqeq->context1);
664 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
665 atomic_inc(&nvmep->xmt_ls_abort);
666 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
671 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
672 struct nvmefc_tgt_fcp_req *rsp)
674 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
675 struct lpfc_nvmet_rcv_ctx *ctxp =
676 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
677 struct lpfc_hba *phba = ctxp->phba;
678 struct lpfc_iocbq *nvmewqeq;
681 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
682 if (phba->ktime_on) {
683 if (rsp->op == NVMET_FCOP_RSP)
684 ctxp->ts_nvme_status = ktime_get_ns();
686 ctxp->ts_nvme_data = ktime_get_ns();
688 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
689 int id = smp_processor_id();
691 if (id < LPFC_CHECK_CPU_CNT)
692 phba->cpucheck_xmt_io[id]++;
693 if (rsp->hwqid != id) {
694 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
695 "6705 CPU Check OP: "
696 "cpu %d expect %d\n",
698 ctxp->cpu = rsp->hwqid;
704 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
705 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
706 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
707 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
708 "6102 IO xri x%x aborted\n",
714 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
715 if (nvmewqeq == NULL) {
716 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
717 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
718 "6152 FCP Drop IO x%x: Prep\n",
724 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
725 nvmewqeq->iocb_cmpl = NULL;
726 nvmewqeq->context2 = ctxp;
727 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
728 ctxp->wqeq->hba_wqidx = rsp->hwqid;
730 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
731 ctxp->oxid, rsp->op, rsp->rsplen);
733 ctxp->flag |= LPFC_NVMET_IO_INP;
734 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
735 if (rc == WQE_SUCCESS) {
736 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
739 if (rsp->op == NVMET_FCOP_RSP)
740 ctxp->ts_status_wqput = ktime_get_ns();
742 ctxp->ts_data_wqput = ktime_get_ns();
747 /* Give back resources */
748 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
749 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
750 "6153 FCP Drop IO x%x: Issue: %d\n",
753 ctxp->wqeq->hba_wqidx = 0;
754 nvmewqeq->context2 = NULL;
755 nvmewqeq->context3 = NULL;
762 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
764 struct lpfc_nvmet_tgtport *tport = targetport->private;
766 /* release any threads waiting for the unreg to complete */
767 complete(&tport->tport_unreg_done);
771 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
772 struct nvmefc_tgt_fcp_req *req)
774 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
775 struct lpfc_nvmet_rcv_ctx *ctxp =
776 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
777 struct lpfc_hba *phba = ctxp->phba;
780 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
781 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
782 ctxp->oxid, ctxp->flag, ctxp->state);
784 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
785 ctxp->oxid, ctxp->flag, ctxp->state);
787 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
789 spin_lock_irqsave(&ctxp->ctxlock, flags);
791 /* Since iaab/iaar are NOT set, we need to check
792 * if the firmware is in process of aborting IO
794 if (ctxp->flag & LPFC_NVMET_XBUSY) {
795 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
798 ctxp->flag |= LPFC_NVMET_ABORT_OP;
800 /* An state of LPFC_NVMET_STE_RCV means we have just received
801 * the NVME command and have not started processing it.
802 * (by issuing any IO WQEs on this exchange yet)
804 if (ctxp->state == LPFC_NVMET_STE_RCV)
805 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
808 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
810 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
814 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
815 struct nvmefc_tgt_fcp_req *rsp)
817 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
818 struct lpfc_nvmet_rcv_ctx *ctxp =
819 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
820 struct lpfc_hba *phba = ctxp->phba;
822 bool aborting = false;
824 if (ctxp->state != LPFC_NVMET_STE_DONE &&
825 ctxp->state != LPFC_NVMET_STE_ABORT) {
826 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
827 "6413 NVMET release bad state %d %d oxid x%x\n",
828 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
831 spin_lock_irqsave(&ctxp->ctxlock, flags);
832 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
833 (ctxp->flag & LPFC_NVMET_XBUSY)) {
835 /* let the abort path do the real release */
836 lpfc_nvmet_defer_release(phba, ctxp);
838 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
840 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
841 ctxp->state, aborting);
843 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
848 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
852 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
853 struct nvmefc_tgt_fcp_req *rsp)
855 struct lpfc_nvmet_tgtport *tgtp;
856 struct lpfc_nvmet_rcv_ctx *ctxp =
857 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
858 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
859 struct lpfc_hba *phba = ctxp->phba;
861 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
862 ctxp->oxid, ctxp->size, smp_processor_id());
864 tgtp = phba->targetport->private;
865 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
866 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
869 static struct nvmet_fc_target_template lpfc_tgttemplate = {
870 .targetport_delete = lpfc_nvmet_targetport_delete,
871 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
872 .fcp_op = lpfc_nvmet_xmt_fcp_op,
873 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
874 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
875 .defer_rcv = lpfc_nvmet_defer_rcv,
878 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
879 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
880 .dma_boundary = 0xFFFFFFFF,
882 /* optional features */
883 .target_features = 0,
884 /* sizes of additional private data for data structures */
885 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
889 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
890 struct lpfc_nvmet_ctx_info *infop)
892 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
895 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
896 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
897 &infop->nvmet_ctx_list, list) {
898 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
899 list_del_init(&ctx_buf->list);
900 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
902 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
903 ctx_buf->sglq->state = SGL_FREED;
904 ctx_buf->sglq->ndlp = NULL;
906 spin_lock(&phba->sli4_hba.sgl_list_lock);
907 list_add_tail(&ctx_buf->sglq->list,
908 &phba->sli4_hba.lpfc_nvmet_sgl_list);
909 spin_unlock(&phba->sli4_hba.sgl_list_lock);
911 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
912 kfree(ctx_buf->context);
914 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
918 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
920 struct lpfc_nvmet_ctx_info *infop;
923 /* The first context list, MRQ 0 CPU 0 */
924 infop = phba->sli4_hba.nvmet_ctx_info;
928 /* Cycle the the entire CPU context list for every MRQ */
929 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
930 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
931 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
935 kfree(phba->sli4_hba.nvmet_ctx_info);
936 phba->sli4_hba.nvmet_ctx_info = NULL;
940 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
942 struct lpfc_nvmet_ctxbuf *ctx_buf;
943 struct lpfc_iocbq *nvmewqe;
944 union lpfc_wqe128 *wqe;
945 struct lpfc_nvmet_ctx_info *last_infop;
946 struct lpfc_nvmet_ctx_info *infop;
949 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
950 "6403 Allocate NVMET resources for %d XRIs\n",
951 phba->sli4_hba.nvmet_xri_cnt);
953 phba->sli4_hba.nvmet_ctx_info = kcalloc(
954 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
955 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
956 if (!phba->sli4_hba.nvmet_ctx_info) {
957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
958 "6419 Failed allocate memory for "
959 "nvmet context lists\n");
964 * Assuming X CPUs in the system, and Y MRQs, allocate some
965 * lpfc_nvmet_ctx_info structures as follows:
967 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
968 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
970 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
972 * Each line represents a MRQ "silo" containing an entry for
975 * MRQ X is initially assumed to be associated with CPU X, thus
976 * contexts are initially distributed across all MRQs using
977 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
978 * freed, the are freed to the MRQ silo based on the CPU number
979 * of the IO completion. Thus a context that was allocated for MRQ A
980 * whose IO completed on CPU B will be freed to cpuB/mrqA.
982 infop = phba->sli4_hba.nvmet_ctx_info;
983 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
984 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
985 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
986 spin_lock_init(&infop->nvmet_ctx_list_lock);
987 infop->nvmet_ctx_list_cnt = 0;
993 * Setup the next CPU context info ptr for each MRQ.
994 * MRQ 0 will cycle thru CPUs 0 - X separately from
995 * MRQ 1 cycling thru CPUs 0 - X, and so on.
997 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
998 last_infop = lpfc_get_ctx_list(phba, 0, j);
999 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) {
1000 infop = lpfc_get_ctx_list(phba, i, j);
1001 infop->nvmet_ctx_next_cpu = last_infop;
1006 /* For all nvmet xris, allocate resources needed to process a
1007 * received command on a per xri basis.
1010 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1011 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1013 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1014 "6404 Ran out of memory for NVMET\n");
1018 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1020 if (!ctx_buf->context) {
1022 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1023 "6405 Ran out of NVMET "
1024 "context memory\n");
1027 ctx_buf->context->ctxbuf = ctx_buf;
1028 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1030 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1031 if (!ctx_buf->iocbq) {
1032 kfree(ctx_buf->context);
1034 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1035 "6406 Ran out of NVMET iocb/WQEs\n");
1038 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1039 nvmewqe = ctx_buf->iocbq;
1040 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1041 /* Initialize WQE */
1042 memset(wqe, 0, sizeof(union lpfc_wqe));
1044 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
1045 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
1047 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1048 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
1049 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
1051 ctx_buf->iocbq->context1 = NULL;
1052 spin_lock(&phba->sli4_hba.sgl_list_lock);
1053 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1054 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1055 if (!ctx_buf->sglq) {
1056 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1057 kfree(ctx_buf->context);
1059 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1060 "6407 Ran out of NVMET XRIs\n");
1065 * Add ctx to MRQidx context list. Our initial assumption
1066 * is MRQidx will be associated with CPUidx. This association
1067 * can change on the fly.
1069 infop = lpfc_get_ctx_list(phba, idx, idx);
1070 spin_lock(&infop->nvmet_ctx_list_lock);
1071 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1072 infop->nvmet_ctx_list_cnt++;
1073 spin_unlock(&infop->nvmet_ctx_list_lock);
1075 /* Spread ctx structures evenly across all MRQs */
1077 if (idx >= phba->cfg_nvmet_mrq)
1081 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1082 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1083 infop = lpfc_get_ctx_list(phba, i, j);
1084 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1085 "6408 TOTAL NVMET ctx for CPU %d "
1086 "MRQ %d: cnt %d nextcpu %p\n",
1087 i, j, infop->nvmet_ctx_list_cnt,
1088 infop->nvmet_ctx_next_cpu);
1095 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1097 struct lpfc_vport *vport = phba->pport;
1098 struct lpfc_nvmet_tgtport *tgtp;
1099 struct nvmet_fc_port_info pinfo;
1102 if (phba->targetport)
1105 error = lpfc_nvmet_setup_io_context(phba);
1109 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1110 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1111 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1112 pinfo.port_id = vport->fc_myDID;
1114 /* Limit to LPFC_MAX_NVME_SEG_CNT.
1115 * For now need + 1 to get around NVME transport logic.
1117 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1118 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1119 "6400 Reducing sg segment cnt to %d\n",
1120 LPFC_MAX_NVME_SEG_CNT);
1121 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1123 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1125 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1126 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1127 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
1128 NVMET_FCTGTFEAT_CMD_IN_ISR |
1129 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
1131 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1132 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1139 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1140 "6025 Cannot register NVME targetport x%x: "
1141 "portnm %llx nodenm %llx segs %d qs %d\n",
1143 pinfo.port_name, pinfo.node_name,
1144 lpfc_tgttemplate.max_sgl_segments,
1145 lpfc_tgttemplate.max_hw_queues);
1146 phba->targetport = NULL;
1147 phba->nvmet_support = 0;
1149 lpfc_nvmet_cleanup_io_context(phba);
1152 tgtp = (struct lpfc_nvmet_tgtport *)
1153 phba->targetport->private;
1156 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1157 "6026 Registered NVME "
1158 "targetport: %p, private %p "
1159 "portnm %llx nodenm %llx segs %d qs %d\n",
1160 phba->targetport, tgtp,
1161 pinfo.port_name, pinfo.node_name,
1162 lpfc_tgttemplate.max_sgl_segments,
1163 lpfc_tgttemplate.max_hw_queues);
1165 atomic_set(&tgtp->rcv_ls_req_in, 0);
1166 atomic_set(&tgtp->rcv_ls_req_out, 0);
1167 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1168 atomic_set(&tgtp->xmt_ls_abort, 0);
1169 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1170 atomic_set(&tgtp->xmt_ls_rsp, 0);
1171 atomic_set(&tgtp->xmt_ls_drop, 0);
1172 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1173 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1174 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1175 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1176 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1177 atomic_set(&tgtp->xmt_fcp_drop, 0);
1178 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1179 atomic_set(&tgtp->xmt_fcp_read, 0);
1180 atomic_set(&tgtp->xmt_fcp_write, 0);
1181 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1182 atomic_set(&tgtp->xmt_fcp_release, 0);
1183 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1184 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1185 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1186 atomic_set(&tgtp->xmt_fcp_abort, 0);
1187 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1188 atomic_set(&tgtp->xmt_abort_unsol, 0);
1189 atomic_set(&tgtp->xmt_abort_sol, 0);
1190 atomic_set(&tgtp->xmt_abort_rsp, 0);
1191 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1197 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1199 struct lpfc_vport *vport = phba->pport;
1201 if (!phba->targetport)
1204 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1205 "6007 Update NVMET port %p did x%x\n",
1206 phba->targetport, vport->fc_myDID);
1208 phba->targetport->port_id = vport->fc_myDID;
1213 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1214 * @phba: pointer to lpfc hba data structure.
1215 * @axri: pointer to the nvmet xri abort wcqe structure.
1217 * This routine is invoked by the worker thread to process a SLI4 fast-path
1218 * NVMET aborted xri.
1221 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1222 struct sli4_wcqe_xri_aborted *axri)
1224 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1225 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1226 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1227 struct lpfc_nodelist *ndlp;
1228 unsigned long iflag = 0;
1230 bool released = false;
1232 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1233 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1235 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1237 spin_lock_irqsave(&phba->hbalock, iflag);
1238 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1239 list_for_each_entry_safe(ctxp, next_ctxp,
1240 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1242 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1245 /* Check if we already received a free context call
1246 * and we have completed processing an abort situation.
1248 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1249 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1250 list_del(&ctxp->list);
1253 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1254 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1256 rrq_empty = list_empty(&phba->active_rrq_list);
1257 spin_unlock_irqrestore(&phba->hbalock, iflag);
1258 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1259 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1260 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1261 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1262 lpfc_set_rrq_active(phba, ndlp,
1263 ctxp->ctxbuf->sglq->sli4_lxritag,
1265 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1268 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1269 "6318 XB aborted oxid %x flg x%x (%x)\n",
1270 ctxp->oxid, ctxp->flag, released);
1272 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1275 lpfc_worker_wake_up(phba);
1278 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1279 spin_unlock_irqrestore(&phba->hbalock, iflag);
1283 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1284 struct fc_frame_header *fc_hdr)
1287 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1288 struct lpfc_hba *phba = vport->phba;
1289 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1290 struct nvmefc_tgt_fcp_req *rsp;
1292 unsigned long iflag = 0;
1294 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1296 spin_lock_irqsave(&phba->hbalock, iflag);
1297 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1298 list_for_each_entry_safe(ctxp, next_ctxp,
1299 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1301 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1304 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1305 spin_unlock_irqrestore(&phba->hbalock, iflag);
1307 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1308 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1309 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1311 lpfc_nvmeio_data(phba,
1312 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1313 xri, smp_processor_id(), 0);
1315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1316 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1318 rsp = &ctxp->ctx.fcp_req;
1319 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1321 /* Respond with BA_ACC accordingly */
1322 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1325 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1326 spin_unlock_irqrestore(&phba->hbalock, iflag);
1328 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1329 xri, smp_processor_id(), 1);
1331 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1332 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1334 /* Respond with BA_RJT accordingly */
1335 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1341 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1343 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1344 struct lpfc_nvmet_tgtport *tgtp;
1346 if (phba->nvmet_support == 0)
1348 if (phba->targetport) {
1349 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1350 init_completion(&tgtp->tport_unreg_done);
1351 nvmet_fc_unregister_targetport(phba->targetport);
1352 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1353 lpfc_nvmet_cleanup_io_context(phba);
1355 phba->targetport = NULL;
1360 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1361 * @phba: pointer to lpfc hba data structure.
1362 * @pring: pointer to a SLI ring.
1363 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1365 * This routine is used for processing the WQE associated with a unsolicited
1366 * event. It first determines whether there is an existing ndlp that matches
1367 * the DID from the unsolicited WQE. If not, it will create a new one with
1368 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1369 * WQE is then used to invoke the proper routine and to set up proper state
1370 * of the discovery state machine.
1373 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1374 struct hbq_dmabuf *nvmebuf)
1376 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1377 struct lpfc_nvmet_tgtport *tgtp;
1378 struct fc_frame_header *fc_hdr;
1379 struct lpfc_nvmet_rcv_ctx *ctxp;
1381 uint32_t size, oxid, sid, rc;
1383 if (!nvmebuf || !phba->targetport) {
1384 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1385 "6154 LS Drop IO\n");
1393 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1394 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1395 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1396 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1397 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1398 sid = sli4_sid_from_fc_hdr(fc_hdr);
1400 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1402 atomic_inc(&tgtp->rcv_ls_req_drop);
1403 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1404 "6155 LS Drop IO x%x: Alloc\n",
1407 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1408 "xri x%x sz %d from %06x\n",
1411 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1419 ctxp->state = LPFC_NVMET_STE_LS_RCV;
1420 ctxp->entry_cnt = 1;
1421 ctxp->rqb_buffer = (void *)nvmebuf;
1423 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1426 * The calling sequence should be:
1427 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1428 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1430 atomic_inc(&tgtp->rcv_ls_req_in);
1431 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1434 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1435 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1436 "%08x %08x %08x\n", size, rc,
1437 *payload, *(payload+1), *(payload+2),
1438 *(payload+3), *(payload+4), *(payload+5));
1441 atomic_inc(&tgtp->rcv_ls_req_out);
1445 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1448 atomic_inc(&tgtp->rcv_ls_req_drop);
1449 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1450 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1453 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1455 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1457 atomic_inc(&tgtp->xmt_ls_abort);
1458 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1462 static struct lpfc_nvmet_ctxbuf *
1463 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1464 struct lpfc_nvmet_ctx_info *current_infop)
1466 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1467 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1468 struct lpfc_nvmet_ctx_info *get_infop;
1472 * The current_infop for the MRQ a NVME command IU was received
1473 * on is empty. Our goal is to replenish this MRQs context
1474 * list from a another CPUs.
1476 * First we need to pick a context list to start looking on.
1477 * nvmet_ctx_start_cpu has available context the last time
1478 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1479 * is just the next sequential CPU for this MRQ.
1481 if (current_infop->nvmet_ctx_start_cpu)
1482 get_infop = current_infop->nvmet_ctx_start_cpu;
1484 get_infop = current_infop->nvmet_ctx_next_cpu;
1486 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1487 if (get_infop == current_infop) {
1488 get_infop = get_infop->nvmet_ctx_next_cpu;
1491 spin_lock(&get_infop->nvmet_ctx_list_lock);
1493 /* Just take the entire context list, if there are any */
1494 if (get_infop->nvmet_ctx_list_cnt) {
1495 list_splice_init(&get_infop->nvmet_ctx_list,
1496 ¤t_infop->nvmet_ctx_list);
1497 current_infop->nvmet_ctx_list_cnt =
1498 get_infop->nvmet_ctx_list_cnt - 1;
1499 get_infop->nvmet_ctx_list_cnt = 0;
1500 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1502 current_infop->nvmet_ctx_start_cpu = get_infop;
1503 list_remove_head(¤t_infop->nvmet_ctx_list,
1504 ctx_buf, struct lpfc_nvmet_ctxbuf,
1509 /* Otherwise, move on to the next CPU for this MRQ */
1510 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1511 get_infop = get_infop->nvmet_ctx_next_cpu;
1515 /* Nothing found, all contexts for the MRQ are in-flight */
1520 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1521 * @phba: pointer to lpfc hba data structure.
1522 * @idx: relative index of MRQ vector
1523 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1525 * This routine is used for processing the WQE associated with a unsolicited
1526 * event. It first determines whether there is an existing ndlp that matches
1527 * the DID from the unsolicited WQE. If not, it will create a new one with
1528 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1529 * WQE is then used to invoke the proper routine and to set up proper state
1530 * of the discovery state machine.
1533 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1535 struct rqb_dmabuf *nvmebuf,
1536 uint64_t isr_timestamp)
1538 struct lpfc_nvmet_rcv_ctx *ctxp;
1539 struct lpfc_nvmet_tgtport *tgtp;
1540 struct fc_frame_header *fc_hdr;
1541 struct lpfc_nvmet_ctxbuf *ctx_buf;
1542 struct lpfc_nvmet_ctx_info *current_infop;
1544 uint32_t size, oxid, sid, rc, qno;
1545 unsigned long iflag;
1547 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1551 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1555 if (!nvmebuf || !phba->targetport) {
1556 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1557 "6157 NVMET FCP Drop IO\n");
1566 * Get a pointer to the context list for this MRQ based on
1567 * the CPU this MRQ IRQ is associated with. If the CPU association
1568 * changes from our initial assumption, the context list could
1569 * be empty, thus it would need to be replenished with the
1570 * context list from another CPU for this MRQ.
1572 current_cpu = smp_processor_id();
1573 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1574 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
1575 if (current_infop->nvmet_ctx_list_cnt) {
1576 list_remove_head(¤t_infop->nvmet_ctx_list,
1577 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1578 current_infop->nvmet_ctx_list_cnt--;
1580 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1582 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
1584 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1585 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1586 size = nvmebuf->bytes_recv;
1588 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1589 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1590 id = smp_processor_id();
1591 if (id < LPFC_CHECK_CPU_CNT)
1592 phba->cpucheck_rcv_io[id]++;
1596 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1597 oxid, size, smp_processor_id());
1600 /* Queue this NVME IO to process later */
1601 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1602 list_add_tail(&nvmebuf->hbuf.list,
1603 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1604 phba->sli4_hba.nvmet_io_wait_cnt++;
1605 phba->sli4_hba.nvmet_io_wait_total++;
1606 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1609 /* Post a brand new DMA buffer to RQ */
1611 lpfc_post_rq_buffer(
1612 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1613 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1617 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1618 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1619 sid = sli4_sid_from_fc_hdr(fc_hdr);
1621 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1622 if (ctxp->state != LPFC_NVMET_STE_FREE) {
1623 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1624 "6414 NVMET Context corrupt %d %d oxid x%x\n",
1625 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1635 ctxp->state = LPFC_NVMET_STE_RCV;
1636 ctxp->entry_cnt = 1;
1638 ctxp->ctxbuf = ctx_buf;
1639 spin_lock_init(&ctxp->ctxlock);
1641 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1642 if (phba->ktime_on) {
1643 ctxp->ts_isr_cmd = isr_timestamp;
1644 ctxp->ts_cmd_nvme = ktime_get_ns();
1645 ctxp->ts_nvme_data = 0;
1646 ctxp->ts_data_wqput = 0;
1647 ctxp->ts_isr_data = 0;
1648 ctxp->ts_data_nvme = 0;
1649 ctxp->ts_nvme_status = 0;
1650 ctxp->ts_status_wqput = 0;
1651 ctxp->ts_isr_status = 0;
1652 ctxp->ts_status_nvme = 0;
1656 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1658 * The calling sequence should be:
1659 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1660 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1661 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1662 * the NVME command / FC header is stored, so we are free to repost
1665 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1668 /* Process FCP command */
1670 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1671 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1675 /* Processing of FCP command is deferred */
1676 if (rc == -EOVERFLOW) {
1677 lpfc_nvmeio_data(phba,
1678 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1680 /* defer reposting rcv buffer till .defer_rcv callback */
1681 ctxp->rqb_buffer = nvmebuf;
1682 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1686 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1687 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1688 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1690 atomic_read(&tgtp->rcv_fcp_cmd_in),
1691 atomic_read(&tgtp->rcv_fcp_cmd_out),
1692 atomic_read(&tgtp->xmt_fcp_release));
1694 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1697 lpfc_nvmet_defer_release(phba, ctxp);
1698 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1699 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1704 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1707 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1711 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1712 * @phba: pointer to lpfc hba data structure.
1713 * @pring: pointer to a SLI ring.
1714 * @nvmebuf: pointer to received nvme data structure.
1716 * This routine is used to process an unsolicited event received from a SLI
1717 * (Service Level Interface) ring. The actual processing of the data buffer
1718 * associated with the unsolicited event is done by invoking the routine
1719 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1720 * SLI RQ on which the unsolicited event was received.
1723 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
1726 struct lpfc_dmabuf *d_buf;
1727 struct hbq_dmabuf *nvmebuf;
1729 d_buf = piocb->context2;
1730 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1732 if (phba->nvmet_support == 0) {
1733 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1736 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1740 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1741 * @phba: pointer to lpfc hba data structure.
1742 * @idx: relative index of MRQ vector
1743 * @nvmebuf: pointer to received nvme data structure.
1745 * This routine is used to process an unsolicited event received from a SLI
1746 * (Service Level Interface) ring. The actual processing of the data buffer
1747 * associated with the unsolicited event is done by invoking the routine
1748 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1749 * SLI RQ on which the unsolicited event was received.
1752 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1754 struct rqb_dmabuf *nvmebuf,
1755 uint64_t isr_timestamp)
1757 if (phba->nvmet_support == 0) {
1758 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1761 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
1766 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1767 * @phba: pointer to a host N_Port data structure.
1768 * @ctxp: Context info for NVME LS Request
1769 * @rspbuf: DMA buffer of NVME command.
1770 * @rspsize: size of the NVME command.
1772 * This routine is used for allocating a lpfc-WQE data structure from
1773 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1774 * passed into the routine for discovery state machine to issue an Extended
1775 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1776 * and preparation routine that is used by all the discovery state machine
1777 * routines and the NVME command-specific fields will be later set up by
1778 * the individual discovery machine routines after calling this routine
1779 * allocating and preparing a generic WQE data structure. It fills in the
1780 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1781 * payload and response payload (if expected). The reference count on the
1782 * ndlp is incremented by 1 and the reference to the ndlp is put into
1783 * context1 of the WQE data structure for this WQE to hold the ndlp
1784 * reference for the command's callback function to access later.
1787 * Pointer to the newly allocated/prepared nvme wqe data structure
1788 * NULL - when nvme wqe data structure allocation/preparation failed
1790 static struct lpfc_iocbq *
1791 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1792 struct lpfc_nvmet_rcv_ctx *ctxp,
1793 dma_addr_t rspbuf, uint16_t rspsize)
1795 struct lpfc_nodelist *ndlp;
1796 struct lpfc_iocbq *nvmewqe;
1797 union lpfc_wqe *wqe;
1799 if (!lpfc_is_link_up(phba)) {
1800 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1801 "6104 NVMET prep LS wqe: link err: "
1802 "NPORT x%x oxid:x%x ste %d\n",
1803 ctxp->sid, ctxp->oxid, ctxp->state);
1807 /* Allocate buffer for command wqe */
1808 nvmewqe = lpfc_sli_get_iocbq(phba);
1809 if (nvmewqe == NULL) {
1810 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1811 "6105 NVMET prep LS wqe: No WQE: "
1812 "NPORT x%x oxid x%x ste %d\n",
1813 ctxp->sid, ctxp->oxid, ctxp->state);
1817 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1818 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1819 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1820 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1821 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1822 "6106 NVMET prep LS wqe: No ndlp: "
1823 "NPORT x%x oxid x%x ste %d\n",
1824 ctxp->sid, ctxp->oxid, ctxp->state);
1825 goto nvme_wqe_free_wqeq_exit;
1827 ctxp->wqeq = nvmewqe;
1829 /* prevent preparing wqe with NULL ndlp reference */
1830 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1831 if (nvmewqe->context1 == NULL)
1832 goto nvme_wqe_free_wqeq_exit;
1833 nvmewqe->context2 = ctxp;
1835 wqe = &nvmewqe->wqe;
1836 memset(wqe, 0, sizeof(union lpfc_wqe));
1839 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1840 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1841 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1842 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1849 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1850 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1851 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1852 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1853 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1856 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1857 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1858 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1861 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1862 CMD_XMIT_SEQUENCE64_WQE);
1863 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1864 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1865 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1868 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1871 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1872 /* Needs to be set by caller */
1873 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1876 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1877 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1878 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1879 LPFC_WQE_LENLOC_WORD12);
1880 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1883 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1884 LPFC_WQE_CQ_ID_DEFAULT);
1885 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1889 wqe->xmit_sequence.xmit_len = rspsize;
1892 nvmewqe->vport = phba->pport;
1893 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1894 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1896 /* Xmit NVMET response to remote NPORT <did> */
1897 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1898 "6039 Xmit NVMET LS response to remote "
1899 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1900 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1904 nvme_wqe_free_wqeq_exit:
1905 nvmewqe->context2 = NULL;
1906 nvmewqe->context3 = NULL;
1907 lpfc_sli_release_iocbq(phba, nvmewqe);
1912 static struct lpfc_iocbq *
1913 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1914 struct lpfc_nvmet_rcv_ctx *ctxp)
1916 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1917 struct lpfc_nvmet_tgtport *tgtp;
1918 struct sli4_sge *sgl;
1919 struct lpfc_nodelist *ndlp;
1920 struct lpfc_iocbq *nvmewqe;
1921 struct scatterlist *sgel;
1922 union lpfc_wqe128 *wqe;
1924 dma_addr_t physaddr;
1928 if (!lpfc_is_link_up(phba)) {
1929 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1930 "6107 NVMET prep FCP wqe: link err:"
1931 "NPORT x%x oxid x%x ste %d\n",
1932 ctxp->sid, ctxp->oxid, ctxp->state);
1936 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1937 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1938 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1939 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1940 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1941 "6108 NVMET prep FCP wqe: no ndlp: "
1942 "NPORT x%x oxid x%x ste %d\n",
1943 ctxp->sid, ctxp->oxid, ctxp->state);
1947 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1948 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1949 "6109 NVMET prep FCP wqe: seg cnt err: "
1950 "NPORT x%x oxid x%x ste %d cnt %d\n",
1951 ctxp->sid, ctxp->oxid, ctxp->state,
1952 phba->cfg_nvme_seg_cnt);
1956 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1957 nvmewqe = ctxp->wqeq;
1958 if (nvmewqe == NULL) {
1959 /* Allocate buffer for command wqe */
1960 nvmewqe = ctxp->ctxbuf->iocbq;
1961 if (nvmewqe == NULL) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1963 "6110 NVMET prep FCP wqe: No "
1964 "WQE: NPORT x%x oxid x%x ste %d\n",
1965 ctxp->sid, ctxp->oxid, ctxp->state);
1968 ctxp->wqeq = nvmewqe;
1969 xc = 0; /* create new XRI */
1970 nvmewqe->sli4_lxritag = NO_XRI;
1971 nvmewqe->sli4_xritag = NO_XRI;
1975 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1976 (ctxp->entry_cnt == 1)) ||
1977 (ctxp->state == LPFC_NVMET_STE_DATA)) {
1978 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1980 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1981 "6111 Wrong state NVMET FCP: %d cnt %d\n",
1982 ctxp->state, ctxp->entry_cnt);
1986 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1988 case NVMET_FCOP_READDATA:
1989 case NVMET_FCOP_READDATA_RSP:
1990 /* Words 0 - 2 : The first sg segment */
1992 physaddr = sg_dma_address(sgel);
1993 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1994 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1995 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1996 wqe->fcp_tsend.bde.addrHigh =
1997 cpu_to_le32(putPaddrHigh(physaddr));
2000 wqe->fcp_tsend.payload_offset_len = 0;
2003 wqe->fcp_tsend.relative_offset = ctxp->offset;
2008 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2009 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2010 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2011 nvmewqe->sli4_xritag);
2014 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1);
2015 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
2018 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2021 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2022 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2025 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2026 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
2027 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
2028 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
2029 LPFC_WQE_LENLOC_WORD12);
2030 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
2031 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
2032 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
2033 if (phba->cfg_nvme_oas)
2034 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
2037 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
2038 LPFC_WQE_CQ_ID_DEFAULT);
2039 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
2043 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2045 /* Setup 2 SKIP SGEs */
2049 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2050 sgl->word2 = cpu_to_le32(sgl->word2);
2056 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2057 sgl->word2 = cpu_to_le32(sgl->word2);
2060 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2061 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2062 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
2063 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
2064 (rsp->rsplen == 12)) {
2065 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
2066 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2067 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2068 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2070 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2071 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2072 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2073 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2074 ((rsp->rsplen >> 2) - 1));
2075 memcpy(&wqe->words[16], rsp->rspaddr,
2079 atomic_inc(&tgtp->xmt_fcp_read);
2081 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2082 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
2083 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
2084 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2085 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
2089 case NVMET_FCOP_WRITEDATA:
2090 /* Words 0 - 2 : The first sg segment */
2091 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2092 GFP_KERNEL, &physaddr);
2094 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2095 "6041 Bad txrdy buffer: oxid x%x\n",
2099 ctxp->txrdy = txrdy;
2100 ctxp->txrdy_phys = physaddr;
2101 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2102 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2103 wqe->fcp_treceive.bde.addrLow =
2104 cpu_to_le32(putPaddrLow(physaddr));
2105 wqe->fcp_treceive.bde.addrHigh =
2106 cpu_to_le32(putPaddrHigh(physaddr));
2109 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
2112 wqe->fcp_treceive.relative_offset = ctxp->offset;
2117 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2118 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2119 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2120 nvmewqe->sli4_xritag);
2123 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1);
2124 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
2125 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
2126 CMD_FCP_TRECEIVE64_WQE);
2129 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2132 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2133 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2136 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2137 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
2138 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
2139 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
2140 LPFC_WQE_LENLOC_WORD12);
2141 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
2142 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
2143 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
2144 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
2145 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
2146 if (phba->cfg_nvme_oas)
2147 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
2150 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
2151 LPFC_WQE_CQ_ID_DEFAULT);
2152 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
2153 FCP_COMMAND_TRECEIVE);
2154 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2157 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2159 /* Setup 1 TXRDY and 1 SKIP SGE */
2161 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2164 sgl->addr_hi = putPaddrHigh(physaddr);
2165 sgl->addr_lo = putPaddrLow(physaddr);
2167 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2168 sgl->word2 = cpu_to_le32(sgl->word2);
2169 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2174 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2175 sgl->word2 = cpu_to_le32(sgl->word2);
2178 atomic_inc(&tgtp->xmt_fcp_write);
2181 case NVMET_FCOP_RSP:
2183 physaddr = rsp->rspdma;
2184 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2185 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2186 wqe->fcp_trsp.bde.addrLow =
2187 cpu_to_le32(putPaddrLow(physaddr));
2188 wqe->fcp_trsp.bde.addrHigh =
2189 cpu_to_le32(putPaddrHigh(physaddr));
2192 wqe->fcp_trsp.response_len = rsp->rsplen;
2195 wqe->fcp_trsp.rsvd_4_5[0] = 0;
2201 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2202 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2203 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2204 nvmewqe->sli4_xritag);
2207 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0);
2208 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
2209 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
2212 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2215 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2216 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2219 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2220 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
2221 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
2222 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
2223 LPFC_WQE_LENLOC_WORD3);
2224 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
2225 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
2226 if (phba->cfg_nvme_oas)
2227 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
2230 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
2231 LPFC_WQE_CQ_ID_DEFAULT);
2232 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
2234 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
2236 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2237 /* Good response - all zero's on wire */
2238 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
2239 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
2240 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
2242 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2243 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2244 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2245 ((rsp->rsplen >> 2) - 1));
2246 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2249 /* Use rspbuf, NOT sg list */
2252 atomic_inc(&tgtp->xmt_fcp_rsp);
2256 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2257 "6064 Unknown Rsp Op %d\n",
2263 nvmewqe->vport = phba->pport;
2264 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2265 nvmewqe->context1 = ndlp;
2267 for (i = 0; i < rsp->sg_cnt; i++) {
2269 physaddr = sg_dma_address(sgel);
2270 cnt = sg_dma_len(sgel);
2271 sgl->addr_hi = putPaddrHigh(physaddr);
2272 sgl->addr_lo = putPaddrLow(physaddr);
2274 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2275 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2276 if ((i+1) == rsp->sg_cnt)
2277 bf_set(lpfc_sli4_sge_last, sgl, 1);
2278 sgl->word2 = cpu_to_le32(sgl->word2);
2279 sgl->sge_len = cpu_to_le32(cnt);
2281 ctxp->offset += cnt;
2283 ctxp->state = LPFC_NVMET_STE_DATA;
2289 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2290 * @phba: Pointer to HBA context object.
2291 * @cmdwqe: Pointer to driver command WQE object.
2292 * @wcqe: Pointer to driver response CQE object.
2294 * The function is called from SLI ring event handler with no
2295 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2296 * The function frees memory resources used for the NVME commands.
2299 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2300 struct lpfc_wcqe_complete *wcqe)
2302 struct lpfc_nvmet_rcv_ctx *ctxp;
2303 struct lpfc_nvmet_tgtport *tgtp;
2304 uint32_t status, result;
2305 unsigned long flags;
2306 bool released = false;
2308 ctxp = cmdwqe->context2;
2309 status = bf_get(lpfc_wcqe_c_status, wcqe);
2310 result = wcqe->parameter;
2312 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2313 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2314 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2316 ctxp->state = LPFC_NVMET_STE_DONE;
2318 /* Check if we already received a free context call
2319 * and we have completed processing an abort situation.
2321 spin_lock_irqsave(&ctxp->ctxlock, flags);
2322 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2323 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2324 list_del(&ctxp->list);
2327 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2328 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2329 atomic_inc(&tgtp->xmt_abort_rsp);
2331 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2332 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2333 "WCQE: %08x %08x %08x %08x\n",
2334 ctxp->oxid, ctxp->flag, released,
2335 wcqe->word0, wcqe->total_data_placed,
2336 result, wcqe->word3);
2338 cmdwqe->context2 = NULL;
2339 cmdwqe->context3 = NULL;
2341 * if transport has released ctx, then can reuse it. Otherwise,
2342 * will be recycled by transport release call.
2345 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2347 /* This is the iocbq for the abort, not the command */
2348 lpfc_sli_release_iocbq(phba, cmdwqe);
2350 /* Since iaab/iaar are NOT set, there is no work left.
2351 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2352 * should have been called already.
2357 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2358 * @phba: Pointer to HBA context object.
2359 * @cmdwqe: Pointer to driver command WQE object.
2360 * @wcqe: Pointer to driver response CQE object.
2362 * The function is called from SLI ring event handler with no
2363 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2364 * The function frees memory resources used for the NVME commands.
2367 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2368 struct lpfc_wcqe_complete *wcqe)
2370 struct lpfc_nvmet_rcv_ctx *ctxp;
2371 struct lpfc_nvmet_tgtport *tgtp;
2372 unsigned long flags;
2373 uint32_t status, result;
2374 bool released = false;
2376 ctxp = cmdwqe->context2;
2377 status = bf_get(lpfc_wcqe_c_status, wcqe);
2378 result = wcqe->parameter;
2381 /* if context is clear, related io alrady complete */
2382 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2383 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2384 wcqe->word0, wcqe->total_data_placed,
2385 result, wcqe->word3);
2389 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2390 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2391 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2394 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2395 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2396 "6112 ABTS Wrong state:%d oxid x%x\n",
2397 ctxp->state, ctxp->oxid);
2400 /* Check if we already received a free context call
2401 * and we have completed processing an abort situation.
2403 ctxp->state = LPFC_NVMET_STE_DONE;
2404 spin_lock_irqsave(&ctxp->ctxlock, flags);
2405 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2406 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2407 list_del(&ctxp->list);
2410 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2411 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2412 atomic_inc(&tgtp->xmt_abort_rsp);
2414 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2415 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2416 "WCQE: %08x %08x %08x %08x\n",
2417 ctxp->oxid, ctxp->flag, released,
2418 wcqe->word0, wcqe->total_data_placed,
2419 result, wcqe->word3);
2421 cmdwqe->context2 = NULL;
2422 cmdwqe->context3 = NULL;
2424 * if transport has released ctx, then can reuse it. Otherwise,
2425 * will be recycled by transport release call.
2428 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2430 /* Since iaab/iaar are NOT set, there is no work left.
2431 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2432 * should have been called already.
2437 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2438 * @phba: Pointer to HBA context object.
2439 * @cmdwqe: Pointer to driver command WQE object.
2440 * @wcqe: Pointer to driver response CQE object.
2442 * The function is called from SLI ring event handler with no
2443 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2444 * The function frees memory resources used for the NVME commands.
2447 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2448 struct lpfc_wcqe_complete *wcqe)
2450 struct lpfc_nvmet_rcv_ctx *ctxp;
2451 struct lpfc_nvmet_tgtport *tgtp;
2452 uint32_t status, result;
2454 ctxp = cmdwqe->context2;
2455 status = bf_get(lpfc_wcqe_c_status, wcqe);
2456 result = wcqe->parameter;
2458 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2459 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2461 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2462 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2463 ctxp, wcqe->word0, wcqe->total_data_placed,
2464 result, wcqe->word3);
2467 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2468 "6415 NVMET LS Abort No ctx: WCQE: "
2469 "%08x %08x %08x %08x\n",
2470 wcqe->word0, wcqe->total_data_placed,
2471 result, wcqe->word3);
2473 lpfc_sli_release_iocbq(phba, cmdwqe);
2477 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2478 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2479 "6416 NVMET LS abort cmpl state mismatch: "
2480 "oxid x%x: %d %d\n",
2481 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2484 cmdwqe->context2 = NULL;
2485 cmdwqe->context3 = NULL;
2486 lpfc_sli_release_iocbq(phba, cmdwqe);
2491 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2492 struct lpfc_nvmet_rcv_ctx *ctxp,
2493 uint32_t sid, uint16_t xri)
2495 struct lpfc_nvmet_tgtport *tgtp;
2496 struct lpfc_iocbq *abts_wqeq;
2497 union lpfc_wqe *wqe_abts;
2498 struct lpfc_nodelist *ndlp;
2500 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2501 "6067 ABTS: sid %x xri x%x/x%x\n",
2502 sid, xri, ctxp->wqeq->sli4_xritag);
2504 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2506 ndlp = lpfc_findnode_did(phba->pport, sid);
2507 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2508 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2509 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2510 atomic_inc(&tgtp->xmt_abort_rsp_error);
2511 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2512 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2513 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2515 /* No failure to an ABTS request. */
2519 abts_wqeq = ctxp->wqeq;
2520 wqe_abts = &abts_wqeq->wqe;
2523 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2524 * that were initialized in lpfc_sli4_nvmet_alloc.
2526 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2529 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2530 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2531 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2532 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2533 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2536 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2537 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2538 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2539 abts_wqeq->sli4_xritag);
2542 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2543 CMD_XMIT_SEQUENCE64_WQE);
2544 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2545 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2546 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2549 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2552 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2553 /* Needs to be set by caller */
2554 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2557 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2558 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2559 LPFC_WQE_LENLOC_WORD12);
2560 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2561 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2564 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2565 LPFC_WQE_CQ_ID_DEFAULT);
2566 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2569 abts_wqeq->vport = phba->pport;
2570 abts_wqeq->context1 = ndlp;
2571 abts_wqeq->context2 = ctxp;
2572 abts_wqeq->context3 = NULL;
2573 abts_wqeq->rsvd2 = 0;
2574 /* hba_wqidx should already be setup from command we are aborting */
2575 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2576 abts_wqeq->iocb.ulpLe = 1;
2578 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2579 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2580 xri, abts_wqeq->iotag);
2585 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2586 struct lpfc_nvmet_rcv_ctx *ctxp,
2587 uint32_t sid, uint16_t xri)
2589 struct lpfc_nvmet_tgtport *tgtp;
2590 struct lpfc_iocbq *abts_wqeq;
2591 union lpfc_wqe *abts_wqe;
2592 struct lpfc_nodelist *ndlp;
2593 unsigned long flags;
2596 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2598 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2599 ctxp->wqeq->hba_wqidx = 0;
2602 ndlp = lpfc_findnode_did(phba->pport, sid);
2603 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2604 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2605 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2606 atomic_inc(&tgtp->xmt_abort_rsp_error);
2607 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2608 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2609 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2611 /* No failure to an ABTS request. */
2612 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2616 /* Issue ABTS for this WQE based on iotag */
2617 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2618 if (!ctxp->abort_wqeq) {
2619 atomic_inc(&tgtp->xmt_abort_rsp_error);
2620 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2621 "6161 ABORT failed: No wqeqs: "
2622 "xri: x%x\n", ctxp->oxid);
2623 /* No failure to an ABTS request. */
2624 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2627 abts_wqeq = ctxp->abort_wqeq;
2628 abts_wqe = &abts_wqeq->wqe;
2629 ctxp->state = LPFC_NVMET_STE_ABORT;
2631 /* Announce entry to new IO submit field. */
2632 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2633 "6162 ABORT Request to rport DID x%06x "
2634 "for xri x%x x%x\n",
2635 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2637 /* If the hba is getting reset, this flag is set. It is
2638 * cleared when the reset is complete and rings reestablished.
2640 spin_lock_irqsave(&phba->hbalock, flags);
2641 /* driver queued commands are in process of being flushed */
2642 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2643 spin_unlock_irqrestore(&phba->hbalock, flags);
2644 atomic_inc(&tgtp->xmt_abort_rsp_error);
2645 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2646 "6163 Driver in reset cleanup - flushing "
2647 "NVME Req now. hba_flag x%x oxid x%x\n",
2648 phba->hba_flag, ctxp->oxid);
2649 lpfc_sli_release_iocbq(phba, abts_wqeq);
2650 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2654 /* Outstanding abort is in progress */
2655 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2656 spin_unlock_irqrestore(&phba->hbalock, flags);
2657 atomic_inc(&tgtp->xmt_abort_rsp_error);
2658 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2659 "6164 Outstanding NVME I/O Abort Request "
2660 "still pending on oxid x%x\n",
2662 lpfc_sli_release_iocbq(phba, abts_wqeq);
2663 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2667 /* Ready - mark outstanding as aborted by driver. */
2668 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2670 /* WQEs are reused. Clear stale data and set key fields to
2671 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2673 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2676 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2679 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2680 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2682 /* word 8 - tell the FW to abort the IO associated with this
2683 * outstanding exchange ID.
2685 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2687 /* word 9 - this is the iotag for the abts_wqe completion. */
2688 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2692 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2693 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2696 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2697 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2698 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2700 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2701 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2702 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2703 abts_wqeq->iocb_cmpl = 0;
2704 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2705 abts_wqeq->context2 = ctxp;
2706 abts_wqeq->vport = phba->pport;
2707 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2708 spin_unlock_irqrestore(&phba->hbalock, flags);
2709 if (rc == WQE_SUCCESS) {
2710 atomic_inc(&tgtp->xmt_abort_sol);
2714 atomic_inc(&tgtp->xmt_abort_rsp_error);
2715 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2716 lpfc_sli_release_iocbq(phba, abts_wqeq);
2717 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2718 "6166 Failed ABORT issue_wqe with status x%x "
2726 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2727 struct lpfc_nvmet_rcv_ctx *ctxp,
2728 uint32_t sid, uint16_t xri)
2730 struct lpfc_nvmet_tgtport *tgtp;
2731 struct lpfc_iocbq *abts_wqeq;
2732 unsigned long flags;
2735 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2737 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2738 ctxp->wqeq->hba_wqidx = 0;
2741 if (ctxp->state == LPFC_NVMET_STE_FREE) {
2742 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2743 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
2744 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2748 ctxp->state = LPFC_NVMET_STE_ABORT;
2750 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2754 spin_lock_irqsave(&phba->hbalock, flags);
2755 abts_wqeq = ctxp->wqeq;
2756 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2757 abts_wqeq->iocb_cmpl = NULL;
2758 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2759 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2760 spin_unlock_irqrestore(&phba->hbalock, flags);
2761 if (rc == WQE_SUCCESS) {
2766 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2767 atomic_inc(&tgtp->xmt_abort_rsp_error);
2768 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2769 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2775 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2776 struct lpfc_nvmet_rcv_ctx *ctxp,
2777 uint32_t sid, uint16_t xri)
2779 struct lpfc_nvmet_tgtport *tgtp;
2780 struct lpfc_iocbq *abts_wqeq;
2781 union lpfc_wqe *wqe_abts;
2782 unsigned long flags;
2785 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
2786 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
2787 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2790 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2791 "6418 NVMET LS abort state mismatch "
2793 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2794 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
2797 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2799 /* Issue ABTS for this WQE based on iotag */
2800 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2802 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2803 "6068 Abort failed: No wqeqs: "
2805 /* No failure to an ABTS request. */
2810 abts_wqeq = ctxp->wqeq;
2811 wqe_abts = &abts_wqeq->wqe;
2813 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
2818 spin_lock_irqsave(&phba->hbalock, flags);
2819 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2820 abts_wqeq->iocb_cmpl = 0;
2821 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
2822 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2823 spin_unlock_irqrestore(&phba->hbalock, flags);
2824 if (rc == WQE_SUCCESS) {
2825 atomic_inc(&tgtp->xmt_abort_unsol);
2829 atomic_inc(&tgtp->xmt_abort_rsp_error);
2830 abts_wqeq->context2 = NULL;
2831 abts_wqeq->context3 = NULL;
2832 lpfc_sli_release_iocbq(phba, abts_wqeq);
2834 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2835 "6056 Failed to Issue ABTS. Status x%x\n", rc);