GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT  2
54 #define LPFC_ABORT_WAIT  2
55
56 static char *dif_op_str[] = {
57         "PROT_NORMAL",
58         "PROT_READ_INSERT",
59         "PROT_WRITE_STRIP",
60         "PROT_READ_STRIP",
61         "PROT_WRITE_INSERT",
62         "PROT_READ_PASS",
63         "PROT_WRITE_PASS",
64 };
65
66 struct scsi_dif_tuple {
67         __be16 guard_tag;       /* Checksum */
68         __be16 app_tag;         /* Opaque storage */
69         __be32 ref_tag;         /* Target LBA or indirect LBA */
70 };
71
72 static struct lpfc_rport_data *
73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74 {
75         struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77         if (vport->phba->cfg_fof)
78                 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79         else
80                 return (struct lpfc_rport_data *)sdev->hostdata;
81 }
82
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90 static inline unsigned
91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
92 {
93         return sc->device->sector_size;
94 }
95
96 #define LPFC_CHECK_PROTECT_GUARD        1
97 #define LPFC_CHECK_PROTECT_REF          2
98 static inline unsigned
99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100 {
101         return 1;
102 }
103
104 static inline unsigned
105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106 {
107         if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108                 return 0;
109         if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110                 return 1;
111         return 0;
112 }
113
114 /**
115  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116  * @phba: Pointer to HBA object.
117  * @lpfc_cmd: lpfc scsi command object pointer.
118  *
119  * This function is called from the lpfc_prep_task_mgmt_cmd function to
120  * set the last bit in the response sge entry.
121  **/
122 static void
123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124                                 struct lpfc_io_buf *lpfc_cmd)
125 {
126         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127         if (sgl) {
128                 sgl += 1;
129                 sgl->word2 = le32_to_cpu(sgl->word2);
130                 bf_set(lpfc_sli4_sge_last, sgl, 1);
131                 sgl->word2 = cpu_to_le32(sgl->word2);
132         }
133 }
134
135 /**
136  * lpfc_update_stats - Update statistical data for the command completion
137  * @vport: The virtual port on which this call is executing.
138  * @lpfc_cmd: lpfc scsi command object pointer.
139  *
140  * This function is called when there is a command completion and this
141  * function updates the statistical data for the command completion.
142  **/
143 static void
144 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
145 {
146         struct lpfc_hba *phba = vport->phba;
147         struct lpfc_rport_data *rdata;
148         struct lpfc_nodelist *pnode;
149         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
150         unsigned long flags;
151         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
152         unsigned long latency;
153         int i;
154
155         if (!vport->stat_data_enabled ||
156             vport->stat_data_blocked ||
157             (cmd->result))
158                 return;
159
160         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161         rdata = lpfc_cmd->rdata;
162         pnode = rdata->pnode;
163
164         spin_lock_irqsave(shost->host_lock, flags);
165         if (!pnode ||
166             !pnode->lat_data ||
167             (phba->bucket_type == LPFC_NO_BUCKET)) {
168                 spin_unlock_irqrestore(shost->host_lock, flags);
169                 return;
170         }
171
172         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174                         phba->bucket_step;
175                 /* check array subscript bounds */
176                 if (i < 0)
177                         i = 0;
178                 else if (i >= LPFC_MAX_BUCKET_COUNT)
179                         i = LPFC_MAX_BUCKET_COUNT - 1;
180         } else {
181                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182                         if (latency <= (phba->bucket_base +
183                                 ((1<<i)*phba->bucket_step)))
184                                 break;
185         }
186
187         pnode->lat_data[i].cmd_count++;
188         spin_unlock_irqrestore(shost->host_lock, flags);
189 }
190
191 /**
192  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193  * @phba: The Hba for which this call is being executed.
194  *
195  * This routine is called when there is resource error in driver or firmware.
196  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197  * posts at most 1 event each second. This routine wakes up worker thread of
198  * @phba to process WORKER_RAM_DOWN_EVENT event.
199  *
200  * This routine should be called with no lock held.
201  **/
202 void
203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204 {
205         unsigned long flags;
206         uint32_t evt_posted;
207         unsigned long expires;
208
209         spin_lock_irqsave(&phba->hbalock, flags);
210         atomic_inc(&phba->num_rsrc_err);
211         phba->last_rsrc_error_time = jiffies;
212
213         expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214         if (time_after(expires, jiffies)) {
215                 spin_unlock_irqrestore(&phba->hbalock, flags);
216                 return;
217         }
218
219         phba->last_ramp_down_time = jiffies;
220
221         spin_unlock_irqrestore(&phba->hbalock, flags);
222
223         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225         if (!evt_posted)
226                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229         if (!evt_posted)
230                 lpfc_worker_wake_up(phba);
231         return;
232 }
233
234 /**
235  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236  * @phba: The Hba for which this call is being executed.
237  *
238  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
239  * thread.This routine reduces queue depth for all scsi device on each vport
240  * associated with @phba.
241  **/
242 void
243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 {
245         struct lpfc_vport **vports;
246         struct Scsi_Host  *shost;
247         struct scsi_device *sdev;
248         unsigned long new_queue_depth;
249         unsigned long num_rsrc_err;
250         int i;
251
252         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253
254         /*
255          * The error and success command counters are global per
256          * driver instance.  If another handler has already
257          * operated on this error event, just exit.
258          */
259         if (num_rsrc_err == 0)
260                 return;
261
262         vports = lpfc_create_vport_work_array(phba);
263         if (vports != NULL)
264                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
265                         shost = lpfc_shost_from_vport(vports[i]);
266                         shost_for_each_device(sdev, shost) {
267                                 if (num_rsrc_err >= sdev->queue_depth)
268                                         new_queue_depth = 1;
269                                 else
270                                         new_queue_depth = sdev->queue_depth -
271                                                 num_rsrc_err;
272                                 scsi_change_queue_depth(sdev, new_queue_depth);
273                         }
274                 }
275         lpfc_destroy_vport_work_array(phba, vports);
276         atomic_set(&phba->num_rsrc_err, 0);
277 }
278
279 /**
280  * lpfc_scsi_dev_block - set all scsi hosts to block state
281  * @phba: Pointer to HBA context object.
282  *
283  * This function walks vport list and set each SCSI host to block state
284  * by invoking fc_remote_port_delete() routine. This function is invoked
285  * with EEH when device's PCI slot has been permanently disabled.
286  **/
287 void
288 lpfc_scsi_dev_block(struct lpfc_hba *phba)
289 {
290         struct lpfc_vport **vports;
291         struct Scsi_Host  *shost;
292         struct scsi_device *sdev;
293         struct fc_rport *rport;
294         int i;
295
296         vports = lpfc_create_vport_work_array(phba);
297         if (vports != NULL)
298                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
299                         shost = lpfc_shost_from_vport(vports[i]);
300                         shost_for_each_device(sdev, shost) {
301                                 rport = starget_to_rport(scsi_target(sdev));
302                                 fc_remote_port_delete(rport);
303                         }
304                 }
305         lpfc_destroy_vport_work_array(phba, vports);
306 }
307
308 /**
309  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
310  * @vport: The virtual port for which this call being executed.
311  * @num_to_allocate: The requested number of buffers to allocate.
312  *
313  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
314  * the scsi buffer contains all the necessary information needed to initiate
315  * a SCSI I/O. The non-DMAable buffer region contains information to build
316  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
317  * and the initial BPL. In addition to allocating memory, the FCP CMND and
318  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
319  *
320  * Return codes:
321  *   int - number of scsi buffers that were allocated.
322  *   0 = failure, less than num_to_alloc is a partial failure.
323  **/
324 static int
325 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
326 {
327         struct lpfc_hba *phba = vport->phba;
328         struct lpfc_io_buf *psb;
329         struct ulp_bde64 *bpl;
330         IOCB_t *iocb;
331         dma_addr_t pdma_phys_fcp_cmd;
332         dma_addr_t pdma_phys_fcp_rsp;
333         dma_addr_t pdma_phys_sgl;
334         uint16_t iotag;
335         int bcnt, bpl_size;
336
337         bpl_size = phba->cfg_sg_dma_buf_size -
338                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
339
340         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
341                          "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
342                          num_to_alloc, phba->cfg_sg_dma_buf_size,
343                          (int)sizeof(struct fcp_cmnd),
344                          (int)sizeof(struct fcp_rsp), bpl_size);
345
346         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
347                 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
348                 if (!psb)
349                         break;
350
351                 /*
352                  * Get memory from the pci pool to map the virt space to pci
353                  * bus space for an I/O.  The DMA buffer includes space for the
354                  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
355                  * necessary to support the sg_tablesize.
356                  */
357                 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
358                                         GFP_KERNEL, &psb->dma_handle);
359                 if (!psb->data) {
360                         kfree(psb);
361                         break;
362                 }
363
364
365                 /* Allocate iotag for psb->cur_iocbq. */
366                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
367                 if (iotag == 0) {
368                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
369                                       psb->data, psb->dma_handle);
370                         kfree(psb);
371                         break;
372                 }
373                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
374
375                 psb->fcp_cmnd = psb->data;
376                 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
377                 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
378                         sizeof(struct fcp_rsp);
379
380                 /* Initialize local short-hand pointers. */
381                 bpl = (struct ulp_bde64 *)psb->dma_sgl;
382                 pdma_phys_fcp_cmd = psb->dma_handle;
383                 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
384                 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
385                         sizeof(struct fcp_rsp);
386
387                 /*
388                  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
389                  * are sg list bdes.  Initialize the first two and leave the
390                  * rest for queuecommand.
391                  */
392                 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
393                 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
394                 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
395                 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
396                 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
397
398                 /* Setup the physical region for the FCP RSP */
399                 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
400                 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
401                 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
402                 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
403                 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
404
405                 /*
406                  * Since the IOCB for the FCP I/O is built into this
407                  * lpfc_scsi_buf, initialize it with all known data now.
408                  */
409                 iocb = &psb->cur_iocbq.iocb;
410                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
411                 if ((phba->sli_rev == 3) &&
412                                 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
413                         /* fill in immediate fcp command BDE */
414                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
415                         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
416                         iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
417                                         unsli3.fcp_ext.icd);
418                         iocb->un.fcpi64.bdl.addrHigh = 0;
419                         iocb->ulpBdeCount = 0;
420                         iocb->ulpLe = 0;
421                         /* fill in response BDE */
422                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
423                                                         BUFF_TYPE_BDE_64;
424                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
425                                 sizeof(struct fcp_rsp);
426                         iocb->unsli3.fcp_ext.rbde.addrLow =
427                                 putPaddrLow(pdma_phys_fcp_rsp);
428                         iocb->unsli3.fcp_ext.rbde.addrHigh =
429                                 putPaddrHigh(pdma_phys_fcp_rsp);
430                 } else {
431                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
432                         iocb->un.fcpi64.bdl.bdeSize =
433                                         (2 * sizeof(struct ulp_bde64));
434                         iocb->un.fcpi64.bdl.addrLow =
435                                         putPaddrLow(pdma_phys_sgl);
436                         iocb->un.fcpi64.bdl.addrHigh =
437                                         putPaddrHigh(pdma_phys_sgl);
438                         iocb->ulpBdeCount = 1;
439                         iocb->ulpLe = 1;
440                 }
441                 iocb->ulpClass = CLASS3;
442                 psb->status = IOSTAT_SUCCESS;
443                 /* Put it back into the SCSI buffer list */
444                 psb->cur_iocbq.context1  = psb;
445                 spin_lock_init(&psb->buf_lock);
446                 lpfc_release_scsi_buf_s3(phba, psb);
447
448         }
449
450         return bcnt;
451 }
452
453 /**
454  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
455  * @vport: pointer to lpfc vport data structure.
456  *
457  * This routine is invoked by the vport cleanup for deletions and the cleanup
458  * for an ndlp on removal.
459  **/
460 void
461 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
462 {
463         struct lpfc_hba *phba = vport->phba;
464         struct lpfc_io_buf *psb, *next_psb;
465         struct lpfc_sli4_hdw_queue *qp;
466         unsigned long iflag = 0;
467         int idx;
468
469         if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
470                 return;
471
472         spin_lock_irqsave(&phba->hbalock, iflag);
473         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
474                 qp = &phba->sli4_hba.hdwq[idx];
475
476                 spin_lock(&qp->abts_io_buf_list_lock);
477                 list_for_each_entry_safe(psb, next_psb,
478                                          &qp->lpfc_abts_io_buf_list, list) {
479                         if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
480                                 continue;
481
482                         if (psb->rdata && psb->rdata->pnode &&
483                             psb->rdata->pnode->vport == vport)
484                                 psb->rdata = NULL;
485                 }
486                 spin_unlock(&qp->abts_io_buf_list_lock);
487         }
488         spin_unlock_irqrestore(&phba->hbalock, iflag);
489 }
490
491 /**
492  * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
493  * @phba: pointer to lpfc hba data structure.
494  * @axri: pointer to the fcp xri abort wcqe structure.
495  *
496  * This routine is invoked by the worker thread to process a SLI4 fast-path
497  * FCP or NVME aborted xri.
498  **/
499 void
500 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
501                          struct sli4_wcqe_xri_aborted *axri, int idx)
502 {
503         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
504         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
505         struct lpfc_io_buf *psb, *next_psb;
506         struct lpfc_sli4_hdw_queue *qp;
507         unsigned long iflag = 0;
508         struct lpfc_iocbq *iocbq;
509         int i;
510         struct lpfc_nodelist *ndlp;
511         int rrq_empty = 0;
512         struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
513
514         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
515                 return;
516
517         qp = &phba->sli4_hba.hdwq[idx];
518         spin_lock_irqsave(&phba->hbalock, iflag);
519         spin_lock(&qp->abts_io_buf_list_lock);
520         list_for_each_entry_safe(psb, next_psb,
521                 &qp->lpfc_abts_io_buf_list, list) {
522                 if (psb->cur_iocbq.sli4_xritag == xri) {
523                         list_del_init(&psb->list);
524                         psb->flags &= ~LPFC_SBUF_XBUSY;
525                         psb->status = IOSTAT_SUCCESS;
526                         if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
527                                 qp->abts_nvme_io_bufs--;
528                                 spin_unlock(&qp->abts_io_buf_list_lock);
529                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
530                                 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
531                                 return;
532                         }
533                         qp->abts_scsi_io_bufs--;
534                         spin_unlock(&qp->abts_io_buf_list_lock);
535
536                         if (psb->rdata && psb->rdata->pnode)
537                                 ndlp = psb->rdata->pnode;
538                         else
539                                 ndlp = NULL;
540
541                         rrq_empty = list_empty(&phba->active_rrq_list);
542                         spin_unlock_irqrestore(&phba->hbalock, iflag);
543                         if (ndlp) {
544                                 lpfc_set_rrq_active(phba, ndlp,
545                                         psb->cur_iocbq.sli4_lxritag, rxid, 1);
546                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
547                         }
548                         lpfc_release_scsi_buf_s4(phba, psb);
549                         if (rrq_empty)
550                                 lpfc_worker_wake_up(phba);
551                         return;
552                 }
553         }
554         spin_unlock(&qp->abts_io_buf_list_lock);
555         for (i = 1; i <= phba->sli.last_iotag; i++) {
556                 iocbq = phba->sli.iocbq_lookup[i];
557
558                 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
559                     (iocbq->iocb_flag & LPFC_IO_LIBDFC))
560                         continue;
561                 if (iocbq->sli4_xritag != xri)
562                         continue;
563                 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
564                 psb->flags &= ~LPFC_SBUF_XBUSY;
565                 spin_unlock_irqrestore(&phba->hbalock, iflag);
566                 if (!list_empty(&pring->txq))
567                         lpfc_worker_wake_up(phba);
568                 return;
569
570         }
571         spin_unlock_irqrestore(&phba->hbalock, iflag);
572 }
573
574 /**
575  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
576  * @phba: The HBA for which this call is being executed.
577  *
578  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
579  * and returns to caller.
580  *
581  * Return codes:
582  *   NULL - Error
583  *   Pointer to lpfc_scsi_buf - Success
584  **/
585 static struct lpfc_io_buf *
586 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
587                      struct scsi_cmnd *cmnd)
588 {
589         struct lpfc_io_buf *lpfc_cmd = NULL;
590         struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
591         unsigned long iflag = 0;
592
593         spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
594         list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
595                          list);
596         if (!lpfc_cmd) {
597                 spin_lock(&phba->scsi_buf_list_put_lock);
598                 list_splice(&phba->lpfc_scsi_buf_list_put,
599                             &phba->lpfc_scsi_buf_list_get);
600                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
601                 list_remove_head(scsi_buf_list_get, lpfc_cmd,
602                                  struct lpfc_io_buf, list);
603                 spin_unlock(&phba->scsi_buf_list_put_lock);
604         }
605         spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
606
607         if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
608                 atomic_inc(&ndlp->cmd_pending);
609                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
610         }
611         return  lpfc_cmd;
612 }
613 /**
614  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
615  * @phba: The HBA for which this call is being executed.
616  *
617  * This routine removes a scsi buffer from head of @hdwq io_buf_list
618  * and returns to caller.
619  *
620  * Return codes:
621  *   NULL - Error
622  *   Pointer to lpfc_scsi_buf - Success
623  **/
624 static struct lpfc_io_buf *
625 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
626                      struct scsi_cmnd *cmnd)
627 {
628         struct lpfc_io_buf *lpfc_cmd;
629         struct lpfc_sli4_hdw_queue *qp;
630         struct sli4_sge *sgl;
631         IOCB_t *iocb;
632         dma_addr_t pdma_phys_fcp_rsp;
633         dma_addr_t pdma_phys_fcp_cmd;
634         uint32_t cpu, idx;
635         int tag;
636         struct fcp_cmd_rsp_buf *tmp = NULL;
637
638         cpu = raw_smp_processor_id();
639         if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
640                 tag = blk_mq_unique_tag(cmnd->request);
641                 idx = blk_mq_unique_tag_to_hwq(tag);
642         } else {
643                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
644         }
645
646         lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
647                                    !phba->cfg_xri_rebalancing);
648         if (!lpfc_cmd) {
649                 qp = &phba->sli4_hba.hdwq[idx];
650                 qp->empty_io_bufs++;
651                 return NULL;
652         }
653
654         /* Setup key fields in buffer that may have been changed
655          * if other protocols used this buffer.
656          */
657         lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
658         lpfc_cmd->prot_seg_cnt = 0;
659         lpfc_cmd->seg_cnt = 0;
660         lpfc_cmd->timeout = 0;
661         lpfc_cmd->flags = 0;
662         lpfc_cmd->start_time = jiffies;
663         lpfc_cmd->waitq = NULL;
664         lpfc_cmd->cpu = cpu;
665 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
666         lpfc_cmd->prot_data_type = 0;
667 #endif
668         tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
669         if (!tmp) {
670                 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
671                 return NULL;
672         }
673
674         lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
675         lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
676
677         /*
678          * The first two SGEs are the FCP_CMD and FCP_RSP.
679          * The balance are sg list bdes. Initialize the
680          * first two and leave the rest for queuecommand.
681          */
682         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
683         pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
684         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
685         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
686         sgl->word2 = le32_to_cpu(sgl->word2);
687         bf_set(lpfc_sli4_sge_last, sgl, 0);
688         sgl->word2 = cpu_to_le32(sgl->word2);
689         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
690         sgl++;
691
692         /* Setup the physical region for the FCP RSP */
693         pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
694         sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
695         sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
696         sgl->word2 = le32_to_cpu(sgl->word2);
697         bf_set(lpfc_sli4_sge_last, sgl, 1);
698         sgl->word2 = cpu_to_le32(sgl->word2);
699         sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
700
701         /*
702          * Since the IOCB for the FCP I/O is built into this
703          * lpfc_io_buf, initialize it with all known data now.
704          */
705         iocb = &lpfc_cmd->cur_iocbq.iocb;
706         iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
707         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
708         /* setting the BLP size to 2 * sizeof BDE may not be correct.
709          * We are setting the bpl to point to out sgl. An sgl's
710          * entries are 16 bytes, a bpl entries are 12 bytes.
711          */
712         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
713         iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
714         iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
715         iocb->ulpBdeCount = 1;
716         iocb->ulpLe = 1;
717         iocb->ulpClass = CLASS3;
718
719         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
720                 atomic_inc(&ndlp->cmd_pending);
721                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
722         }
723         return  lpfc_cmd;
724 }
725 /**
726  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
727  * @phba: The HBA for which this call is being executed.
728  *
729  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
730  * and returns to caller.
731  *
732  * Return codes:
733  *   NULL - Error
734  *   Pointer to lpfc_scsi_buf - Success
735  **/
736 static struct lpfc_io_buf*
737 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
738                   struct scsi_cmnd *cmnd)
739 {
740         return  phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
741 }
742
743 /**
744  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
745  * @phba: The Hba for which this call is being executed.
746  * @psb: The scsi buffer which is being released.
747  *
748  * This routine releases @psb scsi buffer by adding it to tail of @phba
749  * lpfc_scsi_buf_list list.
750  **/
751 static void
752 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
753 {
754         unsigned long iflag = 0;
755
756         psb->seg_cnt = 0;
757         psb->prot_seg_cnt = 0;
758
759         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
760         psb->pCmd = NULL;
761         psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
762         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
763         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
764 }
765
766 /**
767  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
768  * @phba: The Hba for which this call is being executed.
769  * @psb: The scsi buffer which is being released.
770  *
771  * This routine releases @psb scsi buffer by adding it to tail of @hdwq
772  * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
773  * and cannot be reused for at least RA_TOV amount of time if it was
774  * aborted.
775  **/
776 static void
777 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
778 {
779         struct lpfc_sli4_hdw_queue *qp;
780         unsigned long iflag = 0;
781
782         psb->seg_cnt = 0;
783         psb->prot_seg_cnt = 0;
784
785         qp = psb->hdwq;
786         if (psb->flags & LPFC_SBUF_XBUSY) {
787                 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
788                 psb->pCmd = NULL;
789                 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
790                 qp->abts_scsi_io_bufs++;
791                 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
792         } else {
793                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
794         }
795 }
796
797 /**
798  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
799  * @phba: The Hba for which this call is being executed.
800  * @psb: The scsi buffer which is being released.
801  *
802  * This routine releases @psb scsi buffer by adding it to tail of @phba
803  * lpfc_scsi_buf_list list.
804  **/
805 static void
806 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
807 {
808         if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
809                 atomic_dec(&psb->ndlp->cmd_pending);
810
811         psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
812         phba->lpfc_release_scsi_buf(phba, psb);
813 }
814
815 /**
816  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
817  * @phba: The Hba for which this call is being executed.
818  * @lpfc_cmd: The scsi buffer which is going to be mapped.
819  *
820  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
821  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
822  * through sg elements and format the bde. This routine also initializes all
823  * IOCB fields which are dependent on scsi command request buffer.
824  *
825  * Return codes:
826  *   1 - Error
827  *   0 - Success
828  **/
829 static int
830 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
831 {
832         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
833         struct scatterlist *sgel = NULL;
834         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
835         struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
836         struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
837         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
838         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
839         dma_addr_t physaddr;
840         uint32_t num_bde = 0;
841         int nseg, datadir = scsi_cmnd->sc_data_direction;
842
843         /*
844          * There are three possibilities here - use scatter-gather segment, use
845          * the single mapping, or neither.  Start the lpfc command prep by
846          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
847          * data bde entry.
848          */
849         bpl += 2;
850         if (scsi_sg_count(scsi_cmnd)) {
851                 /*
852                  * The driver stores the segment count returned from pci_map_sg
853                  * because this a count of dma-mappings used to map the use_sg
854                  * pages.  They are not guaranteed to be the same for those
855                  * architectures that implement an IOMMU.
856                  */
857
858                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
859                                   scsi_sg_count(scsi_cmnd), datadir);
860                 if (unlikely(!nseg))
861                         return 1;
862
863                 lpfc_cmd->seg_cnt = nseg;
864                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
865                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
866                                         "9064 BLKGRD: %s: Too many sg segments"
867                                         " from dma_map_sg.  Config %d, seg_cnt"
868                                         " %d\n", __func__, phba->cfg_sg_seg_cnt,
869                                         lpfc_cmd->seg_cnt);
870                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
871                         lpfc_cmd->seg_cnt = 0;
872                         scsi_dma_unmap(scsi_cmnd);
873                         return 2;
874                 }
875
876                 /*
877                  * The driver established a maximum scatter-gather segment count
878                  * during probe that limits the number of sg elements in any
879                  * single scsi command.  Just run through the seg_cnt and format
880                  * the bde's.
881                  * When using SLI-3 the driver will try to fit all the BDEs into
882                  * the IOCB. If it can't then the BDEs get added to a BPL as it
883                  * does for SLI-2 mode.
884                  */
885                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
886                         physaddr = sg_dma_address(sgel);
887                         if (phba->sli_rev == 3 &&
888                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
889                             !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
890                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
891                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
892                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
893                                 data_bde->addrLow = putPaddrLow(physaddr);
894                                 data_bde->addrHigh = putPaddrHigh(physaddr);
895                                 data_bde++;
896                         } else {
897                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
898                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
899                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
900                                 bpl->addrLow =
901                                         le32_to_cpu(putPaddrLow(physaddr));
902                                 bpl->addrHigh =
903                                         le32_to_cpu(putPaddrHigh(physaddr));
904                                 bpl++;
905                         }
906                 }
907         }
908
909         /*
910          * Finish initializing those IOCB fields that are dependent on the
911          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
912          * explicitly reinitialized and for SLI-3 the extended bde count is
913          * explicitly reinitialized since all iocb memory resources are reused.
914          */
915         if (phba->sli_rev == 3 &&
916             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
917             !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
918                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
919                         /*
920                          * The extended IOCB format can only fit 3 BDE or a BPL.
921                          * This I/O has more than 3 BDE so the 1st data bde will
922                          * be a BPL that is filled in here.
923                          */
924                         physaddr = lpfc_cmd->dma_handle;
925                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
926                         data_bde->tus.f.bdeSize = (num_bde *
927                                                    sizeof(struct ulp_bde64));
928                         physaddr += (sizeof(struct fcp_cmnd) +
929                                      sizeof(struct fcp_rsp) +
930                                      (2 * sizeof(struct ulp_bde64)));
931                         data_bde->addrHigh = putPaddrHigh(physaddr);
932                         data_bde->addrLow = putPaddrLow(physaddr);
933                         /* ebde count includes the response bde and data bpl */
934                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
935                 } else {
936                         /* ebde count includes the response bde and data bdes */
937                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
938                 }
939         } else {
940                 iocb_cmd->un.fcpi64.bdl.bdeSize =
941                         ((num_bde + 2) * sizeof(struct ulp_bde64));
942                 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
943         }
944         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
945
946         /*
947          * Due to difference in data length between DIF/non-DIF paths,
948          * we need to set word 4 of IOCB here
949          */
950         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
951         return 0;
952 }
953
954 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
955
956 /* Return BG_ERR_INIT if error injection is detected by Initiator */
957 #define BG_ERR_INIT     0x1
958 /* Return BG_ERR_TGT if error injection is detected by Target */
959 #define BG_ERR_TGT      0x2
960 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
961 #define BG_ERR_SWAP     0x10
962 /**
963  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
964  * error injection
965  **/
966 #define BG_ERR_CHECK    0x20
967
968 /**
969  * lpfc_bg_err_inject - Determine if we should inject an error
970  * @phba: The Hba for which this call is being executed.
971  * @sc: The SCSI command to examine
972  * @reftag: (out) BlockGuard reference tag for transmitted data
973  * @apptag: (out) BlockGuard application tag for transmitted data
974  * @new_guard (in) Value to replace CRC with if needed
975  *
976  * Returns BG_ERR_* bit mask or 0 if request ignored
977  **/
978 static int
979 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
980                 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
981 {
982         struct scatterlist *sgpe; /* s/g prot entry */
983         struct lpfc_io_buf *lpfc_cmd = NULL;
984         struct scsi_dif_tuple *src = NULL;
985         struct lpfc_nodelist *ndlp;
986         struct lpfc_rport_data *rdata;
987         uint32_t op = scsi_get_prot_op(sc);
988         uint32_t blksize;
989         uint32_t numblks;
990         sector_t lba;
991         int rc = 0;
992         int blockoff = 0;
993
994         if (op == SCSI_PROT_NORMAL)
995                 return 0;
996
997         sgpe = scsi_prot_sglist(sc);
998         lba = scsi_get_lba(sc);
999
1000         /* First check if we need to match the LBA */
1001         if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1002                 blksize = lpfc_cmd_blksize(sc);
1003                 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1004
1005                 /* Make sure we have the right LBA if one is specified */
1006                 if ((phba->lpfc_injerr_lba < lba) ||
1007                         (phba->lpfc_injerr_lba >= (lba + numblks)))
1008                         return 0;
1009                 if (sgpe) {
1010                         blockoff = phba->lpfc_injerr_lba - lba;
1011                         numblks = sg_dma_len(sgpe) /
1012                                 sizeof(struct scsi_dif_tuple);
1013                         if (numblks < blockoff)
1014                                 blockoff = numblks;
1015                 }
1016         }
1017
1018         /* Next check if we need to match the remote NPortID or WWPN */
1019         rdata = lpfc_rport_data_from_scsi_device(sc->device);
1020         if (rdata && rdata->pnode) {
1021                 ndlp = rdata->pnode;
1022
1023                 /* Make sure we have the right NPortID if one is specified */
1024                 if (phba->lpfc_injerr_nportid  &&
1025                         (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1026                         return 0;
1027
1028                 /*
1029                  * Make sure we have the right WWPN if one is specified.
1030                  * wwn[0] should be a non-zero NAA in a good WWPN.
1031                  */
1032                 if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1033                         (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1034                                 sizeof(struct lpfc_name)) != 0))
1035                         return 0;
1036         }
1037
1038         /* Setup a ptr to the protection data if the SCSI host provides it */
1039         if (sgpe) {
1040                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1041                 src += blockoff;
1042                 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1043         }
1044
1045         /* Should we change the Reference Tag */
1046         if (reftag) {
1047                 if (phba->lpfc_injerr_wref_cnt) {
1048                         switch (op) {
1049                         case SCSI_PROT_WRITE_PASS:
1050                                 if (src) {
1051                                         /*
1052                                          * For WRITE_PASS, force the error
1053                                          * to be sent on the wire. It should
1054                                          * be detected by the Target.
1055                                          * If blockoff != 0 error will be
1056                                          * inserted in middle of the IO.
1057                                          */
1058
1059                                         lpfc_printf_log(phba, KERN_ERR,
1060                                                         LOG_TRACE_EVENT,
1061                                         "9076 BLKGRD: Injecting reftag error: "
1062                                         "write lba x%lx + x%x oldrefTag x%x\n",
1063                                         (unsigned long)lba, blockoff,
1064                                         be32_to_cpu(src->ref_tag));
1065
1066                                         /*
1067                                          * Save the old ref_tag so we can
1068                                          * restore it on completion.
1069                                          */
1070                                         if (lpfc_cmd) {
1071                                                 lpfc_cmd->prot_data_type =
1072                                                         LPFC_INJERR_REFTAG;
1073                                                 lpfc_cmd->prot_data_segment =
1074                                                         src;
1075                                                 lpfc_cmd->prot_data =
1076                                                         src->ref_tag;
1077                                         }
1078                                         src->ref_tag = cpu_to_be32(0xDEADBEEF);
1079                                         phba->lpfc_injerr_wref_cnt--;
1080                                         if (phba->lpfc_injerr_wref_cnt == 0) {
1081                                                 phba->lpfc_injerr_nportid = 0;
1082                                                 phba->lpfc_injerr_lba =
1083                                                         LPFC_INJERR_LBA_OFF;
1084                                                 memset(&phba->lpfc_injerr_wwpn,
1085                                                   0, sizeof(struct lpfc_name));
1086                                         }
1087                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1088
1089                                         break;
1090                                 }
1091                                 fallthrough;
1092                         case SCSI_PROT_WRITE_INSERT:
1093                                 /*
1094                                  * For WRITE_INSERT, force the error
1095                                  * to be sent on the wire. It should be
1096                                  * detected by the Target.
1097                                  */
1098                                 /* DEADBEEF will be the reftag on the wire */
1099                                 *reftag = 0xDEADBEEF;
1100                                 phba->lpfc_injerr_wref_cnt--;
1101                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1102                                         phba->lpfc_injerr_nportid = 0;
1103                                         phba->lpfc_injerr_lba =
1104                                         LPFC_INJERR_LBA_OFF;
1105                                         memset(&phba->lpfc_injerr_wwpn,
1106                                                 0, sizeof(struct lpfc_name));
1107                                 }
1108                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1109
1110                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1111                                         "9078 BLKGRD: Injecting reftag error: "
1112                                         "write lba x%lx\n", (unsigned long)lba);
1113                                 break;
1114                         case SCSI_PROT_WRITE_STRIP:
1115                                 /*
1116                                  * For WRITE_STRIP and WRITE_PASS,
1117                                  * force the error on data
1118                                  * being copied from SLI-Host to SLI-Port.
1119                                  */
1120                                 *reftag = 0xDEADBEEF;
1121                                 phba->lpfc_injerr_wref_cnt--;
1122                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1123                                         phba->lpfc_injerr_nportid = 0;
1124                                         phba->lpfc_injerr_lba =
1125                                                 LPFC_INJERR_LBA_OFF;
1126                                         memset(&phba->lpfc_injerr_wwpn,
1127                                                 0, sizeof(struct lpfc_name));
1128                                 }
1129                                 rc = BG_ERR_INIT;
1130
1131                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1132                                         "9077 BLKGRD: Injecting reftag error: "
1133                                         "write lba x%lx\n", (unsigned long)lba);
1134                                 break;
1135                         }
1136                 }
1137                 if (phba->lpfc_injerr_rref_cnt) {
1138                         switch (op) {
1139                         case SCSI_PROT_READ_INSERT:
1140                         case SCSI_PROT_READ_STRIP:
1141                         case SCSI_PROT_READ_PASS:
1142                                 /*
1143                                  * For READ_STRIP and READ_PASS, force the
1144                                  * error on data being read off the wire. It
1145                                  * should force an IO error to the driver.
1146                                  */
1147                                 *reftag = 0xDEADBEEF;
1148                                 phba->lpfc_injerr_rref_cnt--;
1149                                 if (phba->lpfc_injerr_rref_cnt == 0) {
1150                                         phba->lpfc_injerr_nportid = 0;
1151                                         phba->lpfc_injerr_lba =
1152                                                 LPFC_INJERR_LBA_OFF;
1153                                         memset(&phba->lpfc_injerr_wwpn,
1154                                                 0, sizeof(struct lpfc_name));
1155                                 }
1156                                 rc = BG_ERR_INIT;
1157
1158                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1159                                         "9079 BLKGRD: Injecting reftag error: "
1160                                         "read lba x%lx\n", (unsigned long)lba);
1161                                 break;
1162                         }
1163                 }
1164         }
1165
1166         /* Should we change the Application Tag */
1167         if (apptag) {
1168                 if (phba->lpfc_injerr_wapp_cnt) {
1169                         switch (op) {
1170                         case SCSI_PROT_WRITE_PASS:
1171                                 if (src) {
1172                                         /*
1173                                          * For WRITE_PASS, force the error
1174                                          * to be sent on the wire. It should
1175                                          * be detected by the Target.
1176                                          * If blockoff != 0 error will be
1177                                          * inserted in middle of the IO.
1178                                          */
1179
1180                                         lpfc_printf_log(phba, KERN_ERR,
1181                                                         LOG_TRACE_EVENT,
1182                                         "9080 BLKGRD: Injecting apptag error: "
1183                                         "write lba x%lx + x%x oldappTag x%x\n",
1184                                         (unsigned long)lba, blockoff,
1185                                         be16_to_cpu(src->app_tag));
1186
1187                                         /*
1188                                          * Save the old app_tag so we can
1189                                          * restore it on completion.
1190                                          */
1191                                         if (lpfc_cmd) {
1192                                                 lpfc_cmd->prot_data_type =
1193                                                         LPFC_INJERR_APPTAG;
1194                                                 lpfc_cmd->prot_data_segment =
1195                                                         src;
1196                                                 lpfc_cmd->prot_data =
1197                                                         src->app_tag;
1198                                         }
1199                                         src->app_tag = cpu_to_be16(0xDEAD);
1200                                         phba->lpfc_injerr_wapp_cnt--;
1201                                         if (phba->lpfc_injerr_wapp_cnt == 0) {
1202                                                 phba->lpfc_injerr_nportid = 0;
1203                                                 phba->lpfc_injerr_lba =
1204                                                         LPFC_INJERR_LBA_OFF;
1205                                                 memset(&phba->lpfc_injerr_wwpn,
1206                                                   0, sizeof(struct lpfc_name));
1207                                         }
1208                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1209                                         break;
1210                                 }
1211                                 fallthrough;
1212                         case SCSI_PROT_WRITE_INSERT:
1213                                 /*
1214                                  * For WRITE_INSERT, force the
1215                                  * error to be sent on the wire. It should be
1216                                  * detected by the Target.
1217                                  */
1218                                 /* DEAD will be the apptag on the wire */
1219                                 *apptag = 0xDEAD;
1220                                 phba->lpfc_injerr_wapp_cnt--;
1221                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1222                                         phba->lpfc_injerr_nportid = 0;
1223                                         phba->lpfc_injerr_lba =
1224                                                 LPFC_INJERR_LBA_OFF;
1225                                         memset(&phba->lpfc_injerr_wwpn,
1226                                                 0, sizeof(struct lpfc_name));
1227                                 }
1228                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1229
1230                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1231                                         "0813 BLKGRD: Injecting apptag error: "
1232                                         "write lba x%lx\n", (unsigned long)lba);
1233                                 break;
1234                         case SCSI_PROT_WRITE_STRIP:
1235                                 /*
1236                                  * For WRITE_STRIP and WRITE_PASS,
1237                                  * force the error on data
1238                                  * being copied from SLI-Host to SLI-Port.
1239                                  */
1240                                 *apptag = 0xDEAD;
1241                                 phba->lpfc_injerr_wapp_cnt--;
1242                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1243                                         phba->lpfc_injerr_nportid = 0;
1244                                         phba->lpfc_injerr_lba =
1245                                                 LPFC_INJERR_LBA_OFF;
1246                                         memset(&phba->lpfc_injerr_wwpn,
1247                                                 0, sizeof(struct lpfc_name));
1248                                 }
1249                                 rc = BG_ERR_INIT;
1250
1251                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1252                                         "0812 BLKGRD: Injecting apptag error: "
1253                                         "write lba x%lx\n", (unsigned long)lba);
1254                                 break;
1255                         }
1256                 }
1257                 if (phba->lpfc_injerr_rapp_cnt) {
1258                         switch (op) {
1259                         case SCSI_PROT_READ_INSERT:
1260                         case SCSI_PROT_READ_STRIP:
1261                         case SCSI_PROT_READ_PASS:
1262                                 /*
1263                                  * For READ_STRIP and READ_PASS, force the
1264                                  * error on data being read off the wire. It
1265                                  * should force an IO error to the driver.
1266                                  */
1267                                 *apptag = 0xDEAD;
1268                                 phba->lpfc_injerr_rapp_cnt--;
1269                                 if (phba->lpfc_injerr_rapp_cnt == 0) {
1270                                         phba->lpfc_injerr_nportid = 0;
1271                                         phba->lpfc_injerr_lba =
1272                                                 LPFC_INJERR_LBA_OFF;
1273                                         memset(&phba->lpfc_injerr_wwpn,
1274                                                 0, sizeof(struct lpfc_name));
1275                                 }
1276                                 rc = BG_ERR_INIT;
1277
1278                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1279                                         "0814 BLKGRD: Injecting apptag error: "
1280                                         "read lba x%lx\n", (unsigned long)lba);
1281                                 break;
1282                         }
1283                 }
1284         }
1285
1286
1287         /* Should we change the Guard Tag */
1288         if (new_guard) {
1289                 if (phba->lpfc_injerr_wgrd_cnt) {
1290                         switch (op) {
1291                         case SCSI_PROT_WRITE_PASS:
1292                                 rc = BG_ERR_CHECK;
1293                                 fallthrough;
1294
1295                         case SCSI_PROT_WRITE_INSERT:
1296                                 /*
1297                                  * For WRITE_INSERT, force the
1298                                  * error to be sent on the wire. It should be
1299                                  * detected by the Target.
1300                                  */
1301                                 phba->lpfc_injerr_wgrd_cnt--;
1302                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1303                                         phba->lpfc_injerr_nportid = 0;
1304                                         phba->lpfc_injerr_lba =
1305                                                 LPFC_INJERR_LBA_OFF;
1306                                         memset(&phba->lpfc_injerr_wwpn,
1307                                                 0, sizeof(struct lpfc_name));
1308                                 }
1309
1310                                 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1311                                 /* Signals the caller to swap CRC->CSUM */
1312
1313                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1314                                         "0817 BLKGRD: Injecting guard error: "
1315                                         "write lba x%lx\n", (unsigned long)lba);
1316                                 break;
1317                         case SCSI_PROT_WRITE_STRIP:
1318                                 /*
1319                                  * For WRITE_STRIP and WRITE_PASS,
1320                                  * force the error on data
1321                                  * being copied from SLI-Host to SLI-Port.
1322                                  */
1323                                 phba->lpfc_injerr_wgrd_cnt--;
1324                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1325                                         phba->lpfc_injerr_nportid = 0;
1326                                         phba->lpfc_injerr_lba =
1327                                                 LPFC_INJERR_LBA_OFF;
1328                                         memset(&phba->lpfc_injerr_wwpn,
1329                                                 0, sizeof(struct lpfc_name));
1330                                 }
1331
1332                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1333                                 /* Signals the caller to swap CRC->CSUM */
1334
1335                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1336                                         "0816 BLKGRD: Injecting guard error: "
1337                                         "write lba x%lx\n", (unsigned long)lba);
1338                                 break;
1339                         }
1340                 }
1341                 if (phba->lpfc_injerr_rgrd_cnt) {
1342                         switch (op) {
1343                         case SCSI_PROT_READ_INSERT:
1344                         case SCSI_PROT_READ_STRIP:
1345                         case SCSI_PROT_READ_PASS:
1346                                 /*
1347                                  * For READ_STRIP and READ_PASS, force the
1348                                  * error on data being read off the wire. It
1349                                  * should force an IO error to the driver.
1350                                  */
1351                                 phba->lpfc_injerr_rgrd_cnt--;
1352                                 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1353                                         phba->lpfc_injerr_nportid = 0;
1354                                         phba->lpfc_injerr_lba =
1355                                                 LPFC_INJERR_LBA_OFF;
1356                                         memset(&phba->lpfc_injerr_wwpn,
1357                                                 0, sizeof(struct lpfc_name));
1358                                 }
1359
1360                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1361                                 /* Signals the caller to swap CRC->CSUM */
1362
1363                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1364                                         "0818 BLKGRD: Injecting guard error: "
1365                                         "read lba x%lx\n", (unsigned long)lba);
1366                         }
1367                 }
1368         }
1369
1370         return rc;
1371 }
1372 #endif
1373
1374 /**
1375  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1376  * the specified SCSI command.
1377  * @phba: The Hba for which this call is being executed.
1378  * @sc: The SCSI command to examine
1379  * @txopt: (out) BlockGuard operation for transmitted data
1380  * @rxopt: (out) BlockGuard operation for received data
1381  *
1382  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1383  *
1384  **/
1385 static int
1386 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1387                 uint8_t *txop, uint8_t *rxop)
1388 {
1389         uint8_t ret = 0;
1390
1391         if (lpfc_cmd_guard_csum(sc)) {
1392                 switch (scsi_get_prot_op(sc)) {
1393                 case SCSI_PROT_READ_INSERT:
1394                 case SCSI_PROT_WRITE_STRIP:
1395                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1396                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1397                         break;
1398
1399                 case SCSI_PROT_READ_STRIP:
1400                 case SCSI_PROT_WRITE_INSERT:
1401                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1402                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1403                         break;
1404
1405                 case SCSI_PROT_READ_PASS:
1406                 case SCSI_PROT_WRITE_PASS:
1407                         *rxop = BG_OP_IN_CRC_OUT_CSUM;
1408                         *txop = BG_OP_IN_CSUM_OUT_CRC;
1409                         break;
1410
1411                 case SCSI_PROT_NORMAL:
1412                 default:
1413                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1414                                 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1415                                         scsi_get_prot_op(sc));
1416                         ret = 1;
1417                         break;
1418
1419                 }
1420         } else {
1421                 switch (scsi_get_prot_op(sc)) {
1422                 case SCSI_PROT_READ_STRIP:
1423                 case SCSI_PROT_WRITE_INSERT:
1424                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1425                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1426                         break;
1427
1428                 case SCSI_PROT_READ_PASS:
1429                 case SCSI_PROT_WRITE_PASS:
1430                         *rxop = BG_OP_IN_CRC_OUT_CRC;
1431                         *txop = BG_OP_IN_CRC_OUT_CRC;
1432                         break;
1433
1434                 case SCSI_PROT_READ_INSERT:
1435                 case SCSI_PROT_WRITE_STRIP:
1436                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1437                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1438                         break;
1439
1440                 case SCSI_PROT_NORMAL:
1441                 default:
1442                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1443                                 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1444                                         scsi_get_prot_op(sc));
1445                         ret = 1;
1446                         break;
1447                 }
1448         }
1449
1450         return ret;
1451 }
1452
1453 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1454 /**
1455  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1456  * the specified SCSI command in order to force a guard tag error.
1457  * @phba: The Hba for which this call is being executed.
1458  * @sc: The SCSI command to examine
1459  * @txopt: (out) BlockGuard operation for transmitted data
1460  * @rxopt: (out) BlockGuard operation for received data
1461  *
1462  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1463  *
1464  **/
1465 static int
1466 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1467                 uint8_t *txop, uint8_t *rxop)
1468 {
1469         uint8_t ret = 0;
1470
1471         if (lpfc_cmd_guard_csum(sc)) {
1472                 switch (scsi_get_prot_op(sc)) {
1473                 case SCSI_PROT_READ_INSERT:
1474                 case SCSI_PROT_WRITE_STRIP:
1475                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1476                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1477                         break;
1478
1479                 case SCSI_PROT_READ_STRIP:
1480                 case SCSI_PROT_WRITE_INSERT:
1481                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1482                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1483                         break;
1484
1485                 case SCSI_PROT_READ_PASS:
1486                 case SCSI_PROT_WRITE_PASS:
1487                         *rxop = BG_OP_IN_CSUM_OUT_CRC;
1488                         *txop = BG_OP_IN_CRC_OUT_CSUM;
1489                         break;
1490
1491                 case SCSI_PROT_NORMAL:
1492                 default:
1493                         break;
1494
1495                 }
1496         } else {
1497                 switch (scsi_get_prot_op(sc)) {
1498                 case SCSI_PROT_READ_STRIP:
1499                 case SCSI_PROT_WRITE_INSERT:
1500                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1501                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1502                         break;
1503
1504                 case SCSI_PROT_READ_PASS:
1505                 case SCSI_PROT_WRITE_PASS:
1506                         *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1507                         *txop = BG_OP_IN_CSUM_OUT_CSUM;
1508                         break;
1509
1510                 case SCSI_PROT_READ_INSERT:
1511                 case SCSI_PROT_WRITE_STRIP:
1512                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1513                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1514                         break;
1515
1516                 case SCSI_PROT_NORMAL:
1517                 default:
1518                         break;
1519                 }
1520         }
1521
1522         return ret;
1523 }
1524 #endif
1525
1526 /**
1527  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1528  * @phba: The Hba for which this call is being executed.
1529  * @sc: pointer to scsi command we're working on
1530  * @bpl: pointer to buffer list for protection groups
1531  * @datacnt: number of segments of data that have been dma mapped
1532  *
1533  * This function sets up BPL buffer list for protection groups of
1534  * type LPFC_PG_TYPE_NO_DIF
1535  *
1536  * This is usually used when the HBA is instructed to generate
1537  * DIFs and insert them into data stream (or strip DIF from
1538  * incoming data stream)
1539  *
1540  * The buffer list consists of just one protection group described
1541  * below:
1542  *                                +-------------------------+
1543  *   start of prot group  -->     |          PDE_5          |
1544  *                                +-------------------------+
1545  *                                |          PDE_6          |
1546  *                                +-------------------------+
1547  *                                |         Data BDE        |
1548  *                                +-------------------------+
1549  *                                |more Data BDE's ... (opt)|
1550  *                                +-------------------------+
1551  *
1552  *
1553  * Note: Data s/g buffers have been dma mapped
1554  *
1555  * Returns the number of BDEs added to the BPL.
1556  **/
1557 static int
1558 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1559                 struct ulp_bde64 *bpl, int datasegcnt)
1560 {
1561         struct scatterlist *sgde = NULL; /* s/g data entry */
1562         struct lpfc_pde5 *pde5 = NULL;
1563         struct lpfc_pde6 *pde6 = NULL;
1564         dma_addr_t physaddr;
1565         int i = 0, num_bde = 0, status;
1566         int datadir = sc->sc_data_direction;
1567 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1568         uint32_t rc;
1569 #endif
1570         uint32_t checking = 1;
1571         uint32_t reftag;
1572         uint8_t txop, rxop;
1573
1574         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1575         if (status)
1576                 goto out;
1577
1578         /* extract some info from the scsi command for pde*/
1579         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1580
1581 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1582         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1583         if (rc) {
1584                 if (rc & BG_ERR_SWAP)
1585                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1586                 if (rc & BG_ERR_CHECK)
1587                         checking = 0;
1588         }
1589 #endif
1590
1591         /* setup PDE5 with what we have */
1592         pde5 = (struct lpfc_pde5 *) bpl;
1593         memset(pde5, 0, sizeof(struct lpfc_pde5));
1594         bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1595
1596         /* Endianness conversion if necessary for PDE5 */
1597         pde5->word0 = cpu_to_le32(pde5->word0);
1598         pde5->reftag = cpu_to_le32(reftag);
1599
1600         /* advance bpl and increment bde count */
1601         num_bde++;
1602         bpl++;
1603         pde6 = (struct lpfc_pde6 *) bpl;
1604
1605         /* setup PDE6 with the rest of the info */
1606         memset(pde6, 0, sizeof(struct lpfc_pde6));
1607         bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1608         bf_set(pde6_optx, pde6, txop);
1609         bf_set(pde6_oprx, pde6, rxop);
1610
1611         /*
1612          * We only need to check the data on READs, for WRITEs
1613          * protection data is automatically generated, not checked.
1614          */
1615         if (datadir == DMA_FROM_DEVICE) {
1616                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1617                         bf_set(pde6_ce, pde6, checking);
1618                 else
1619                         bf_set(pde6_ce, pde6, 0);
1620
1621                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1622                         bf_set(pde6_re, pde6, checking);
1623                 else
1624                         bf_set(pde6_re, pde6, 0);
1625         }
1626         bf_set(pde6_ai, pde6, 1);
1627         bf_set(pde6_ae, pde6, 0);
1628         bf_set(pde6_apptagval, pde6, 0);
1629
1630         /* Endianness conversion if necessary for PDE6 */
1631         pde6->word0 = cpu_to_le32(pde6->word0);
1632         pde6->word1 = cpu_to_le32(pde6->word1);
1633         pde6->word2 = cpu_to_le32(pde6->word2);
1634
1635         /* advance bpl and increment bde count */
1636         num_bde++;
1637         bpl++;
1638
1639         /* assumption: caller has already run dma_map_sg on command data */
1640         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1641                 physaddr = sg_dma_address(sgde);
1642                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1643                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1644                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1645                 if (datadir == DMA_TO_DEVICE)
1646                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1647                 else
1648                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1649                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1650                 bpl++;
1651                 num_bde++;
1652         }
1653
1654 out:
1655         return num_bde;
1656 }
1657
1658 /**
1659  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1660  * @phba: The Hba for which this call is being executed.
1661  * @sc: pointer to scsi command we're working on
1662  * @bpl: pointer to buffer list for protection groups
1663  * @datacnt: number of segments of data that have been dma mapped
1664  * @protcnt: number of segment of protection data that have been dma mapped
1665  *
1666  * This function sets up BPL buffer list for protection groups of
1667  * type LPFC_PG_TYPE_DIF
1668  *
1669  * This is usually used when DIFs are in their own buffers,
1670  * separate from the data. The HBA can then by instructed
1671  * to place the DIFs in the outgoing stream.  For read operations,
1672  * The HBA could extract the DIFs and place it in DIF buffers.
1673  *
1674  * The buffer list for this type consists of one or more of the
1675  * protection groups described below:
1676  *                                    +-------------------------+
1677  *   start of first prot group  -->   |          PDE_5          |
1678  *                                    +-------------------------+
1679  *                                    |          PDE_6          |
1680  *                                    +-------------------------+
1681  *                                    |      PDE_7 (Prot BDE)   |
1682  *                                    +-------------------------+
1683  *                                    |        Data BDE         |
1684  *                                    +-------------------------+
1685  *                                    |more Data BDE's ... (opt)|
1686  *                                    +-------------------------+
1687  *   start of new  prot group  -->    |          PDE_5          |
1688  *                                    +-------------------------+
1689  *                                    |          ...            |
1690  *                                    +-------------------------+
1691  *
1692  * Note: It is assumed that both data and protection s/g buffers have been
1693  *       mapped for DMA
1694  *
1695  * Returns the number of BDEs added to the BPL.
1696  **/
1697 static int
1698 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1699                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1700 {
1701         struct scatterlist *sgde = NULL; /* s/g data entry */
1702         struct scatterlist *sgpe = NULL; /* s/g prot entry */
1703         struct lpfc_pde5 *pde5 = NULL;
1704         struct lpfc_pde6 *pde6 = NULL;
1705         struct lpfc_pde7 *pde7 = NULL;
1706         dma_addr_t dataphysaddr, protphysaddr;
1707         unsigned short curr_data = 0, curr_prot = 0;
1708         unsigned int split_offset;
1709         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1710         unsigned int protgrp_blks, protgrp_bytes;
1711         unsigned int remainder, subtotal;
1712         int status;
1713         int datadir = sc->sc_data_direction;
1714         unsigned char pgdone = 0, alldone = 0;
1715         unsigned blksize;
1716 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1717         uint32_t rc;
1718 #endif
1719         uint32_t checking = 1;
1720         uint32_t reftag;
1721         uint8_t txop, rxop;
1722         int num_bde = 0;
1723
1724         sgpe = scsi_prot_sglist(sc);
1725         sgde = scsi_sglist(sc);
1726
1727         if (!sgpe || !sgde) {
1728                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1729                                 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1730                                 sgpe, sgde);
1731                 return 0;
1732         }
1733
1734         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1735         if (status)
1736                 goto out;
1737
1738         /* extract some info from the scsi command */
1739         blksize = lpfc_cmd_blksize(sc);
1740         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1741
1742 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1743         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1744         if (rc) {
1745                 if (rc & BG_ERR_SWAP)
1746                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1747                 if (rc & BG_ERR_CHECK)
1748                         checking = 0;
1749         }
1750 #endif
1751
1752         split_offset = 0;
1753         do {
1754                 /* Check to see if we ran out of space */
1755                 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1756                         return num_bde + 3;
1757
1758                 /* setup PDE5 with what we have */
1759                 pde5 = (struct lpfc_pde5 *) bpl;
1760                 memset(pde5, 0, sizeof(struct lpfc_pde5));
1761                 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1762
1763                 /* Endianness conversion if necessary for PDE5 */
1764                 pde5->word0 = cpu_to_le32(pde5->word0);
1765                 pde5->reftag = cpu_to_le32(reftag);
1766
1767                 /* advance bpl and increment bde count */
1768                 num_bde++;
1769                 bpl++;
1770                 pde6 = (struct lpfc_pde6 *) bpl;
1771
1772                 /* setup PDE6 with the rest of the info */
1773                 memset(pde6, 0, sizeof(struct lpfc_pde6));
1774                 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1775                 bf_set(pde6_optx, pde6, txop);
1776                 bf_set(pde6_oprx, pde6, rxop);
1777
1778                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1779                         bf_set(pde6_ce, pde6, checking);
1780                 else
1781                         bf_set(pde6_ce, pde6, 0);
1782
1783                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1784                         bf_set(pde6_re, pde6, checking);
1785                 else
1786                         bf_set(pde6_re, pde6, 0);
1787
1788                 bf_set(pde6_ai, pde6, 1);
1789                 bf_set(pde6_ae, pde6, 0);
1790                 bf_set(pde6_apptagval, pde6, 0);
1791
1792                 /* Endianness conversion if necessary for PDE6 */
1793                 pde6->word0 = cpu_to_le32(pde6->word0);
1794                 pde6->word1 = cpu_to_le32(pde6->word1);
1795                 pde6->word2 = cpu_to_le32(pde6->word2);
1796
1797                 /* advance bpl and increment bde count */
1798                 num_bde++;
1799                 bpl++;
1800
1801                 /* setup the first BDE that points to protection buffer */
1802                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1803                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1804
1805                 /* must be integer multiple of the DIF block length */
1806                 BUG_ON(protgroup_len % 8);
1807
1808                 pde7 = (struct lpfc_pde7 *) bpl;
1809                 memset(pde7, 0, sizeof(struct lpfc_pde7));
1810                 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1811
1812                 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1813                 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1814
1815                 protgrp_blks = protgroup_len / 8;
1816                 protgrp_bytes = protgrp_blks * blksize;
1817
1818                 /* check if this pde is crossing the 4K boundary; if so split */
1819                 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1820                         protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1821                         protgroup_offset += protgroup_remainder;
1822                         protgrp_blks = protgroup_remainder / 8;
1823                         protgrp_bytes = protgrp_blks * blksize;
1824                 } else {
1825                         protgroup_offset = 0;
1826                         curr_prot++;
1827                 }
1828
1829                 num_bde++;
1830
1831                 /* setup BDE's for data blocks associated with DIF data */
1832                 pgdone = 0;
1833                 subtotal = 0; /* total bytes processed for current prot grp */
1834                 while (!pgdone) {
1835                         /* Check to see if we ran out of space */
1836                         if (num_bde >= phba->cfg_total_seg_cnt)
1837                                 return num_bde + 1;
1838
1839                         if (!sgde) {
1840                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1841                                         "9065 BLKGRD:%s Invalid data segment\n",
1842                                                 __func__);
1843                                 return 0;
1844                         }
1845                         bpl++;
1846                         dataphysaddr = sg_dma_address(sgde) + split_offset;
1847                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1848                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1849
1850                         remainder = sg_dma_len(sgde) - split_offset;
1851
1852                         if ((subtotal + remainder) <= protgrp_bytes) {
1853                                 /* we can use this whole buffer */
1854                                 bpl->tus.f.bdeSize = remainder;
1855                                 split_offset = 0;
1856
1857                                 if ((subtotal + remainder) == protgrp_bytes)
1858                                         pgdone = 1;
1859                         } else {
1860                                 /* must split this buffer with next prot grp */
1861                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1862                                 split_offset += bpl->tus.f.bdeSize;
1863                         }
1864
1865                         subtotal += bpl->tus.f.bdeSize;
1866
1867                         if (datadir == DMA_TO_DEVICE)
1868                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1869                         else
1870                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1871                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
1872
1873                         num_bde++;
1874                         curr_data++;
1875
1876                         if (split_offset)
1877                                 break;
1878
1879                         /* Move to the next s/g segment if possible */
1880                         sgde = sg_next(sgde);
1881
1882                 }
1883
1884                 if (protgroup_offset) {
1885                         /* update the reference tag */
1886                         reftag += protgrp_blks;
1887                         bpl++;
1888                         continue;
1889                 }
1890
1891                 /* are we done ? */
1892                 if (curr_prot == protcnt) {
1893                         alldone = 1;
1894                 } else if (curr_prot < protcnt) {
1895                         /* advance to next prot buffer */
1896                         sgpe = sg_next(sgpe);
1897                         bpl++;
1898
1899                         /* update the reference tag */
1900                         reftag += protgrp_blks;
1901                 } else {
1902                         /* if we're here, we have a bug */
1903                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1904                                         "9054 BLKGRD: bug in %s\n", __func__);
1905                 }
1906
1907         } while (!alldone);
1908 out:
1909
1910         return num_bde;
1911 }
1912
1913 /**
1914  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1915  * @phba: The Hba for which this call is being executed.
1916  * @sc: pointer to scsi command we're working on
1917  * @sgl: pointer to buffer list for protection groups
1918  * @datacnt: number of segments of data that have been dma mapped
1919  *
1920  * This function sets up SGL buffer list for protection groups of
1921  * type LPFC_PG_TYPE_NO_DIF
1922  *
1923  * This is usually used when the HBA is instructed to generate
1924  * DIFs and insert them into data stream (or strip DIF from
1925  * incoming data stream)
1926  *
1927  * The buffer list consists of just one protection group described
1928  * below:
1929  *                                +-------------------------+
1930  *   start of prot group  -->     |         DI_SEED         |
1931  *                                +-------------------------+
1932  *                                |         Data SGE        |
1933  *                                +-------------------------+
1934  *                                |more Data SGE's ... (opt)|
1935  *                                +-------------------------+
1936  *
1937  *
1938  * Note: Data s/g buffers have been dma mapped
1939  *
1940  * Returns the number of SGEs added to the SGL.
1941  **/
1942 static uint32_t
1943 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1944                 struct sli4_sge *sgl, int datasegcnt,
1945                 struct lpfc_io_buf *lpfc_cmd)
1946 {
1947         struct scatterlist *sgde = NULL; /* s/g data entry */
1948         struct sli4_sge_diseed *diseed = NULL;
1949         dma_addr_t physaddr;
1950         int i = 0, status;
1951         uint32_t reftag, num_sge = 0;
1952         uint8_t txop, rxop;
1953 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1954         uint32_t rc;
1955 #endif
1956         uint32_t checking = 1;
1957         uint32_t dma_len;
1958         uint32_t dma_offset = 0;
1959         struct sli4_hybrid_sgl *sgl_xtra = NULL;
1960         int j;
1961         bool lsp_just_set = false;
1962
1963         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1964         if (status)
1965                 goto out;
1966
1967         /* extract some info from the scsi command for pde*/
1968         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1969
1970 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1971         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1972         if (rc) {
1973                 if (rc & BG_ERR_SWAP)
1974                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1975                 if (rc & BG_ERR_CHECK)
1976                         checking = 0;
1977         }
1978 #endif
1979
1980         /* setup DISEED with what we have */
1981         diseed = (struct sli4_sge_diseed *) sgl;
1982         memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1983         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1984
1985         /* Endianness conversion if necessary */
1986         diseed->ref_tag = cpu_to_le32(reftag);
1987         diseed->ref_tag_tran = diseed->ref_tag;
1988
1989         /*
1990          * We only need to check the data on READs, for WRITEs
1991          * protection data is automatically generated, not checked.
1992          */
1993         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1994                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1995                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1996                 else
1997                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
1998
1999                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2000                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2001                 else
2002                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2003         }
2004
2005         /* setup DISEED with the rest of the info */
2006         bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2007         bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2008
2009         bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2010         bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2011
2012         /* Endianness conversion if necessary for DISEED */
2013         diseed->word2 = cpu_to_le32(diseed->word2);
2014         diseed->word3 = cpu_to_le32(diseed->word3);
2015
2016         /* advance bpl and increment sge count */
2017         num_sge++;
2018         sgl++;
2019
2020         /* assumption: caller has already run dma_map_sg on command data */
2021         sgde = scsi_sglist(sc);
2022         j = 3;
2023         for (i = 0; i < datasegcnt; i++) {
2024                 /* clear it */
2025                 sgl->word2 = 0;
2026
2027                 /* do we need to expand the segment */
2028                 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2029                     ((datasegcnt - 1) != i)) {
2030                         /* set LSP type */
2031                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2032
2033                         sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2034
2035                         if (unlikely(!sgl_xtra)) {
2036                                 lpfc_cmd->seg_cnt = 0;
2037                                 return 0;
2038                         }
2039                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
2040                                                 sgl_xtra->dma_phys_sgl));
2041                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2042                                                 sgl_xtra->dma_phys_sgl));
2043
2044                 } else {
2045                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2046                 }
2047
2048                 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2049                         if ((datasegcnt - 1) == i)
2050                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
2051                         physaddr = sg_dma_address(sgde);
2052                         dma_len = sg_dma_len(sgde);
2053                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2054                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2055
2056                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2057                         sgl->word2 = cpu_to_le32(sgl->word2);
2058                         sgl->sge_len = cpu_to_le32(dma_len);
2059
2060                         dma_offset += dma_len;
2061                         sgde = sg_next(sgde);
2062
2063                         sgl++;
2064                         num_sge++;
2065                         lsp_just_set = false;
2066
2067                 } else {
2068                         sgl->word2 = cpu_to_le32(sgl->word2);
2069                         sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2070
2071                         sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2072                         i = i - 1;
2073
2074                         lsp_just_set = true;
2075                 }
2076
2077                 j++;
2078
2079         }
2080
2081 out:
2082         return num_sge;
2083 }
2084
2085 /**
2086  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2087  * @phba: The Hba for which this call is being executed.
2088  * @sc: pointer to scsi command we're working on
2089  * @sgl: pointer to buffer list for protection groups
2090  * @datacnt: number of segments of data that have been dma mapped
2091  * @protcnt: number of segment of protection data that have been dma mapped
2092  *
2093  * This function sets up SGL buffer list for protection groups of
2094  * type LPFC_PG_TYPE_DIF
2095  *
2096  * This is usually used when DIFs are in their own buffers,
2097  * separate from the data. The HBA can then by instructed
2098  * to place the DIFs in the outgoing stream.  For read operations,
2099  * The HBA could extract the DIFs and place it in DIF buffers.
2100  *
2101  * The buffer list for this type consists of one or more of the
2102  * protection groups described below:
2103  *                                    +-------------------------+
2104  *   start of first prot group  -->   |         DISEED          |
2105  *                                    +-------------------------+
2106  *                                    |      DIF (Prot SGE)     |
2107  *                                    +-------------------------+
2108  *                                    |        Data SGE         |
2109  *                                    +-------------------------+
2110  *                                    |more Data SGE's ... (opt)|
2111  *                                    +-------------------------+
2112  *   start of new  prot group  -->    |         DISEED          |
2113  *                                    +-------------------------+
2114  *                                    |          ...            |
2115  *                                    +-------------------------+
2116  *
2117  * Note: It is assumed that both data and protection s/g buffers have been
2118  *       mapped for DMA
2119  *
2120  * Returns the number of SGEs added to the SGL.
2121  **/
2122 static uint32_t
2123 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2124                 struct sli4_sge *sgl, int datacnt, int protcnt,
2125                 struct lpfc_io_buf *lpfc_cmd)
2126 {
2127         struct scatterlist *sgde = NULL; /* s/g data entry */
2128         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2129         struct sli4_sge_diseed *diseed = NULL;
2130         dma_addr_t dataphysaddr, protphysaddr;
2131         unsigned short curr_data = 0, curr_prot = 0;
2132         unsigned int split_offset;
2133         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2134         unsigned int protgrp_blks, protgrp_bytes;
2135         unsigned int remainder, subtotal;
2136         int status;
2137         unsigned char pgdone = 0, alldone = 0;
2138         unsigned blksize;
2139         uint32_t reftag;
2140         uint8_t txop, rxop;
2141         uint32_t dma_len;
2142 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2143         uint32_t rc;
2144 #endif
2145         uint32_t checking = 1;
2146         uint32_t dma_offset = 0, num_sge = 0;
2147         int j = 2;
2148         struct sli4_hybrid_sgl *sgl_xtra = NULL;
2149
2150         sgpe = scsi_prot_sglist(sc);
2151         sgde = scsi_sglist(sc);
2152
2153         if (!sgpe || !sgde) {
2154                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2155                                 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2156                                 sgpe, sgde);
2157                 return 0;
2158         }
2159
2160         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2161         if (status)
2162                 goto out;
2163
2164         /* extract some info from the scsi command */
2165         blksize = lpfc_cmd_blksize(sc);
2166         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2167
2168 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2169         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2170         if (rc) {
2171                 if (rc & BG_ERR_SWAP)
2172                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2173                 if (rc & BG_ERR_CHECK)
2174                         checking = 0;
2175         }
2176 #endif
2177
2178         split_offset = 0;
2179         do {
2180                 /* Check to see if we ran out of space */
2181                 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2182                     !(phba->cfg_xpsgl))
2183                         return num_sge + 3;
2184
2185                 /* DISEED and DIF have to be together */
2186                 if (!((j + 1) % phba->border_sge_num) ||
2187                     !((j + 2) % phba->border_sge_num) ||
2188                     !((j + 3) % phba->border_sge_num)) {
2189                         sgl->word2 = 0;
2190
2191                         /* set LSP type */
2192                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2193
2194                         sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2195
2196                         if (unlikely(!sgl_xtra)) {
2197                                 goto out;
2198                         } else {
2199                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2200                                                 sgl_xtra->dma_phys_sgl));
2201                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2202                                                        sgl_xtra->dma_phys_sgl));
2203                         }
2204
2205                         sgl->word2 = cpu_to_le32(sgl->word2);
2206                         sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2207
2208                         sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2209                         j = 0;
2210                 }
2211
2212                 /* setup DISEED with what we have */
2213                 diseed = (struct sli4_sge_diseed *) sgl;
2214                 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2215                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2216
2217                 /* Endianness conversion if necessary */
2218                 diseed->ref_tag = cpu_to_le32(reftag);
2219                 diseed->ref_tag_tran = diseed->ref_tag;
2220
2221                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2222                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2223
2224                 } else {
2225                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2226                         /*
2227                          * When in this mode, the hardware will replace
2228                          * the guard tag from the host with a
2229                          * newly generated good CRC for the wire.
2230                          * Switch to raw mode here to avoid this
2231                          * behavior. What the host sends gets put on the wire.
2232                          */
2233                         if (txop == BG_OP_IN_CRC_OUT_CRC) {
2234                                 txop = BG_OP_RAW_MODE;
2235                                 rxop = BG_OP_RAW_MODE;
2236                         }
2237                 }
2238
2239
2240                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2241                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2242                 else
2243                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2244
2245                 /* setup DISEED with the rest of the info */
2246                 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2247                 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2248
2249                 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2250                 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2251
2252                 /* Endianness conversion if necessary for DISEED */
2253                 diseed->word2 = cpu_to_le32(diseed->word2);
2254                 diseed->word3 = cpu_to_le32(diseed->word3);
2255
2256                 /* advance sgl and increment bde count */
2257                 num_sge++;
2258
2259                 sgl++;
2260                 j++;
2261
2262                 /* setup the first BDE that points to protection buffer */
2263                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2264                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2265
2266                 /* must be integer multiple of the DIF block length */
2267                 BUG_ON(protgroup_len % 8);
2268
2269                 /* Now setup DIF SGE */
2270                 sgl->word2 = 0;
2271                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2272                 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2273                 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2274                 sgl->word2 = cpu_to_le32(sgl->word2);
2275                 sgl->sge_len = 0;
2276
2277                 protgrp_blks = protgroup_len / 8;
2278                 protgrp_bytes = protgrp_blks * blksize;
2279
2280                 /* check if DIF SGE is crossing the 4K boundary; if so split */
2281                 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2282                         protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2283                         protgroup_offset += protgroup_remainder;
2284                         protgrp_blks = protgroup_remainder / 8;
2285                         protgrp_bytes = protgrp_blks * blksize;
2286                 } else {
2287                         protgroup_offset = 0;
2288                         curr_prot++;
2289                 }
2290
2291                 num_sge++;
2292
2293                 /* setup SGE's for data blocks associated with DIF data */
2294                 pgdone = 0;
2295                 subtotal = 0; /* total bytes processed for current prot grp */
2296
2297                 sgl++;
2298                 j++;
2299
2300                 while (!pgdone) {
2301                         /* Check to see if we ran out of space */
2302                         if ((num_sge >= phba->cfg_total_seg_cnt) &&
2303                             !phba->cfg_xpsgl)
2304                                 return num_sge + 1;
2305
2306                         if (!sgde) {
2307                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2308                                         "9086 BLKGRD:%s Invalid data segment\n",
2309                                                 __func__);
2310                                 return 0;
2311                         }
2312
2313                         if (!((j + 1) % phba->border_sge_num)) {
2314                                 sgl->word2 = 0;
2315
2316                                 /* set LSP type */
2317                                 bf_set(lpfc_sli4_sge_type, sgl,
2318                                        LPFC_SGE_TYPE_LSP);
2319
2320                                 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2321                                                                  lpfc_cmd);
2322
2323                                 if (unlikely(!sgl_xtra)) {
2324                                         goto out;
2325                                 } else {
2326                                         sgl->addr_lo = cpu_to_le32(
2327                                           putPaddrLow(sgl_xtra->dma_phys_sgl));
2328                                         sgl->addr_hi = cpu_to_le32(
2329                                           putPaddrHigh(sgl_xtra->dma_phys_sgl));
2330                                 }
2331
2332                                 sgl->word2 = cpu_to_le32(sgl->word2);
2333                                 sgl->sge_len = cpu_to_le32(
2334                                                      phba->cfg_sg_dma_buf_size);
2335
2336                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2337                         } else {
2338                                 dataphysaddr = sg_dma_address(sgde) +
2339                                                                    split_offset;
2340
2341                                 remainder = sg_dma_len(sgde) - split_offset;
2342
2343                                 if ((subtotal + remainder) <= protgrp_bytes) {
2344                                         /* we can use this whole buffer */
2345                                         dma_len = remainder;
2346                                         split_offset = 0;
2347
2348                                         if ((subtotal + remainder) ==
2349                                                                   protgrp_bytes)
2350                                                 pgdone = 1;
2351                                 } else {
2352                                         /* must split this buffer with next
2353                                          * prot grp
2354                                          */
2355                                         dma_len = protgrp_bytes - subtotal;
2356                                         split_offset += dma_len;
2357                                 }
2358
2359                                 subtotal += dma_len;
2360
2361                                 sgl->word2 = 0;
2362                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2363                                                                  dataphysaddr));
2364                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2365                                                                  dataphysaddr));
2366                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2367                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2368                                 bf_set(lpfc_sli4_sge_type, sgl,
2369                                        LPFC_SGE_TYPE_DATA);
2370
2371                                 sgl->sge_len = cpu_to_le32(dma_len);
2372                                 dma_offset += dma_len;
2373
2374                                 num_sge++;
2375                                 curr_data++;
2376
2377                                 if (split_offset) {
2378                                         sgl++;
2379                                         j++;
2380                                         break;
2381                                 }
2382
2383                                 /* Move to the next s/g segment if possible */
2384                                 sgde = sg_next(sgde);
2385
2386                                 sgl++;
2387                         }
2388
2389                         j++;
2390                 }
2391
2392                 if (protgroup_offset) {
2393                         /* update the reference tag */
2394                         reftag += protgrp_blks;
2395                         continue;
2396                 }
2397
2398                 /* are we done ? */
2399                 if (curr_prot == protcnt) {
2400                         /* mark the last SGL */
2401                         sgl--;
2402                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2403                         alldone = 1;
2404                 } else if (curr_prot < protcnt) {
2405                         /* advance to next prot buffer */
2406                         sgpe = sg_next(sgpe);
2407
2408                         /* update the reference tag */
2409                         reftag += protgrp_blks;
2410                 } else {
2411                         /* if we're here, we have a bug */
2412                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2413                                         "9085 BLKGRD: bug in %s\n", __func__);
2414                 }
2415
2416         } while (!alldone);
2417
2418 out:
2419
2420         return num_sge;
2421 }
2422
2423 /**
2424  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2425  * @phba: The Hba for which this call is being executed.
2426  * @sc: pointer to scsi command we're working on
2427  *
2428  * Given a SCSI command that supports DIF, determine composition of protection
2429  * groups involved in setting up buffer lists
2430  *
2431  * Returns: Protection group type (with or without DIF)
2432  *
2433  **/
2434 static int
2435 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2436 {
2437         int ret = LPFC_PG_TYPE_INVALID;
2438         unsigned char op = scsi_get_prot_op(sc);
2439
2440         switch (op) {
2441         case SCSI_PROT_READ_STRIP:
2442         case SCSI_PROT_WRITE_INSERT:
2443                 ret = LPFC_PG_TYPE_NO_DIF;
2444                 break;
2445         case SCSI_PROT_READ_INSERT:
2446         case SCSI_PROT_WRITE_STRIP:
2447         case SCSI_PROT_READ_PASS:
2448         case SCSI_PROT_WRITE_PASS:
2449                 ret = LPFC_PG_TYPE_DIF_BUF;
2450                 break;
2451         default:
2452                 if (phba)
2453                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2454                                         "9021 Unsupported protection op:%d\n",
2455                                         op);
2456                 break;
2457         }
2458         return ret;
2459 }
2460
2461 /**
2462  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2463  * @phba: The Hba for which this call is being executed.
2464  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2465  *
2466  * Adjust the data length to account for how much data
2467  * is actually on the wire.
2468  *
2469  * returns the adjusted data length
2470  **/
2471 static int
2472 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2473                        struct lpfc_io_buf *lpfc_cmd)
2474 {
2475         struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2476         int fcpdl;
2477
2478         fcpdl = scsi_bufflen(sc);
2479
2480         /* Check if there is protection data on the wire */
2481         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2482                 /* Read check for protection data */
2483                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2484                         return fcpdl;
2485
2486         } else {
2487                 /* Write check for protection data */
2488                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2489                         return fcpdl;
2490         }
2491
2492         /*
2493          * If we are in DIF Type 1 mode every data block has a 8 byte
2494          * DIF (trailer) attached to it. Must ajust FCP data length
2495          * to account for the protection data.
2496          */
2497         fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2498
2499         return fcpdl;
2500 }
2501
2502 /**
2503  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2504  * @phba: The Hba for which this call is being executed.
2505  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2506  *
2507  * This is the protection/DIF aware version of
2508  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2509  * two functions eventually, but for now, it's here.
2510  * RETURNS 0 - SUCCESS,
2511  *         1 - Failed DMA map, retry.
2512  *         2 - Invalid scsi cmd or prot-type. Do not rety.
2513  **/
2514 static int
2515 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2516                 struct lpfc_io_buf *lpfc_cmd)
2517 {
2518         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2519         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2520         struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2521         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2522         uint32_t num_bde = 0;
2523         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2524         int prot_group_type = 0;
2525         int fcpdl;
2526         int ret = 1;
2527         struct lpfc_vport *vport = phba->pport;
2528
2529         /*
2530          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2531          *  fcp_rsp regions to the first data bde entry
2532          */
2533         bpl += 2;
2534         if (scsi_sg_count(scsi_cmnd)) {
2535                 /*
2536                  * The driver stores the segment count returned from pci_map_sg
2537                  * because this a count of dma-mappings used to map the use_sg
2538                  * pages.  They are not guaranteed to be the same for those
2539                  * architectures that implement an IOMMU.
2540                  */
2541                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2542                                         scsi_sglist(scsi_cmnd),
2543                                         scsi_sg_count(scsi_cmnd), datadir);
2544                 if (unlikely(!datasegcnt))
2545                         return 1;
2546
2547                 lpfc_cmd->seg_cnt = datasegcnt;
2548
2549                 /* First check if data segment count from SCSI Layer is good */
2550                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2551                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2552                         ret = 2;
2553                         goto err;
2554                 }
2555
2556                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2557
2558                 switch (prot_group_type) {
2559                 case LPFC_PG_TYPE_NO_DIF:
2560
2561                         /* Here we need to add a PDE5 and PDE6 to the count */
2562                         if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2563                                 ret = 2;
2564                                 goto err;
2565                         }
2566
2567                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2568                                         datasegcnt);
2569                         /* we should have 2 or more entries in buffer list */
2570                         if (num_bde < 2) {
2571                                 ret = 2;
2572                                 goto err;
2573                         }
2574                         break;
2575
2576                 case LPFC_PG_TYPE_DIF_BUF:
2577                         /*
2578                          * This type indicates that protection buffers are
2579                          * passed to the driver, so that needs to be prepared
2580                          * for DMA
2581                          */
2582                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
2583                                         scsi_prot_sglist(scsi_cmnd),
2584                                         scsi_prot_sg_count(scsi_cmnd), datadir);
2585                         if (unlikely(!protsegcnt)) {
2586                                 scsi_dma_unmap(scsi_cmnd);
2587                                 return 1;
2588                         }
2589
2590                         lpfc_cmd->prot_seg_cnt = protsegcnt;
2591
2592                         /*
2593                          * There is a minimun of 4 BPLs used for every
2594                          * protection data segment.
2595                          */
2596                         if ((lpfc_cmd->prot_seg_cnt * 4) >
2597                             (phba->cfg_total_seg_cnt - 2)) {
2598                                 ret = 2;
2599                                 goto err;
2600                         }
2601
2602                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2603                                         datasegcnt, protsegcnt);
2604                         /* we should have 3 or more entries in buffer list */
2605                         if ((num_bde < 3) ||
2606                             (num_bde > phba->cfg_total_seg_cnt)) {
2607                                 ret = 2;
2608                                 goto err;
2609                         }
2610                         break;
2611
2612                 case LPFC_PG_TYPE_INVALID:
2613                 default:
2614                         scsi_dma_unmap(scsi_cmnd);
2615                         lpfc_cmd->seg_cnt = 0;
2616
2617                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2618                                         "9022 Unexpected protection group %i\n",
2619                                         prot_group_type);
2620                         return 2;
2621                 }
2622         }
2623
2624         /*
2625          * Finish initializing those IOCB fields that are dependent on the
2626          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2627          * reinitialized since all iocb memory resources are used many times
2628          * for transmit, receive, and continuation bpl's.
2629          */
2630         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2631         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2632         iocb_cmd->ulpBdeCount = 1;
2633         iocb_cmd->ulpLe = 1;
2634
2635         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2636         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2637
2638         /*
2639          * Due to difference in data length between DIF/non-DIF paths,
2640          * we need to set word 4 of IOCB here
2641          */
2642         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2643
2644         /*
2645          * For First burst, we may need to adjust the initial transfer
2646          * length for DIF
2647          */
2648         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2649             (fcpdl < vport->cfg_first_burst_size))
2650                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2651
2652         return 0;
2653 err:
2654         if (lpfc_cmd->seg_cnt)
2655                 scsi_dma_unmap(scsi_cmnd);
2656         if (lpfc_cmd->prot_seg_cnt)
2657                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2658                              scsi_prot_sg_count(scsi_cmnd),
2659                              scsi_cmnd->sc_data_direction);
2660
2661         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2662                         "9023 Cannot setup S/G List for HBA"
2663                         "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2664                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2665                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2666                         prot_group_type, num_bde);
2667
2668         lpfc_cmd->seg_cnt = 0;
2669         lpfc_cmd->prot_seg_cnt = 0;
2670         return ret;
2671 }
2672
2673 /*
2674  * This function calcuates the T10 DIF guard tag
2675  * on the specified data using a CRC algorithmn
2676  * using crc_t10dif.
2677  */
2678 static uint16_t
2679 lpfc_bg_crc(uint8_t *data, int count)
2680 {
2681         uint16_t crc = 0;
2682         uint16_t x;
2683
2684         crc = crc_t10dif(data, count);
2685         x = cpu_to_be16(crc);
2686         return x;
2687 }
2688
2689 /*
2690  * This function calcuates the T10 DIF guard tag
2691  * on the specified data using a CSUM algorithmn
2692  * using ip_compute_csum.
2693  */
2694 static uint16_t
2695 lpfc_bg_csum(uint8_t *data, int count)
2696 {
2697         uint16_t ret;
2698
2699         ret = ip_compute_csum(data, count);
2700         return ret;
2701 }
2702
2703 /*
2704  * This function examines the protection data to try to determine
2705  * what type of T10-DIF error occurred.
2706  */
2707 static void
2708 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2709 {
2710         struct scatterlist *sgpe; /* s/g prot entry */
2711         struct scatterlist *sgde; /* s/g data entry */
2712         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2713         struct scsi_dif_tuple *src = NULL;
2714         uint8_t *data_src = NULL;
2715         uint16_t guard_tag;
2716         uint16_t start_app_tag, app_tag;
2717         uint32_t start_ref_tag, ref_tag;
2718         int prot, protsegcnt;
2719         int err_type, len, data_len;
2720         int chk_ref, chk_app, chk_guard;
2721         uint16_t sum;
2722         unsigned blksize;
2723
2724         err_type = BGS_GUARD_ERR_MASK;
2725         sum = 0;
2726         guard_tag = 0;
2727
2728         /* First check to see if there is protection data to examine */
2729         prot = scsi_get_prot_op(cmd);
2730         if ((prot == SCSI_PROT_READ_STRIP) ||
2731             (prot == SCSI_PROT_WRITE_INSERT) ||
2732             (prot == SCSI_PROT_NORMAL))
2733                 goto out;
2734
2735         /* Currently the driver just supports ref_tag and guard_tag checking */
2736         chk_ref = 1;
2737         chk_app = 0;
2738         chk_guard = 0;
2739
2740         /* Setup a ptr to the protection data provided by the SCSI host */
2741         sgpe = scsi_prot_sglist(cmd);
2742         protsegcnt = lpfc_cmd->prot_seg_cnt;
2743
2744         if (sgpe && protsegcnt) {
2745
2746                 /*
2747                  * We will only try to verify guard tag if the segment
2748                  * data length is a multiple of the blksize.
2749                  */
2750                 sgde = scsi_sglist(cmd);
2751                 blksize = lpfc_cmd_blksize(cmd);
2752                 data_src = (uint8_t *)sg_virt(sgde);
2753                 data_len = sgde->length;
2754                 if ((data_len & (blksize - 1)) == 0)
2755                         chk_guard = 1;
2756
2757                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2758                 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2759                 start_app_tag = src->app_tag;
2760                 len = sgpe->length;
2761                 while (src && protsegcnt) {
2762                         while (len) {
2763
2764                                 /*
2765                                  * First check to see if a protection data
2766                                  * check is valid
2767                                  */
2768                                 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2769                                     (src->app_tag == T10_PI_APP_ESCAPE)) {
2770                                         start_ref_tag++;
2771                                         goto skipit;
2772                                 }
2773
2774                                 /* First Guard Tag checking */
2775                                 if (chk_guard) {
2776                                         guard_tag = src->guard_tag;
2777                                         if (lpfc_cmd_guard_csum(cmd))
2778                                                 sum = lpfc_bg_csum(data_src,
2779                                                                    blksize);
2780                                         else
2781                                                 sum = lpfc_bg_crc(data_src,
2782                                                                   blksize);
2783                                         if ((guard_tag != sum)) {
2784                                                 err_type = BGS_GUARD_ERR_MASK;
2785                                                 goto out;
2786                                         }
2787                                 }
2788
2789                                 /* Reference Tag checking */
2790                                 ref_tag = be32_to_cpu(src->ref_tag);
2791                                 if (chk_ref && (ref_tag != start_ref_tag)) {
2792                                         err_type = BGS_REFTAG_ERR_MASK;
2793                                         goto out;
2794                                 }
2795                                 start_ref_tag++;
2796
2797                                 /* App Tag checking */
2798                                 app_tag = src->app_tag;
2799                                 if (chk_app && (app_tag != start_app_tag)) {
2800                                         err_type = BGS_APPTAG_ERR_MASK;
2801                                         goto out;
2802                                 }
2803 skipit:
2804                                 len -= sizeof(struct scsi_dif_tuple);
2805                                 if (len < 0)
2806                                         len = 0;
2807                                 src++;
2808
2809                                 data_src += blksize;
2810                                 data_len -= blksize;
2811
2812                                 /*
2813                                  * Are we at the end of the Data segment?
2814                                  * The data segment is only used for Guard
2815                                  * tag checking.
2816                                  */
2817                                 if (chk_guard && (data_len == 0)) {
2818                                         chk_guard = 0;
2819                                         sgde = sg_next(sgde);
2820                                         if (!sgde)
2821                                                 goto out;
2822
2823                                         data_src = (uint8_t *)sg_virt(sgde);
2824                                         data_len = sgde->length;
2825                                         if ((data_len & (blksize - 1)) == 0)
2826                                                 chk_guard = 1;
2827                                 }
2828                         }
2829
2830                         /* Goto the next Protection data segment */
2831                         sgpe = sg_next(sgpe);
2832                         if (sgpe) {
2833                                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2834                                 len = sgpe->length;
2835                         } else {
2836                                 src = NULL;
2837                         }
2838                         protsegcnt--;
2839                 }
2840         }
2841 out:
2842         if (err_type == BGS_GUARD_ERR_MASK) {
2843                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2844                                         0x10, 0x1);
2845                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2846                               SAM_STAT_CHECK_CONDITION;
2847                 phba->bg_guard_err_cnt++;
2848                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2849                                 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2850                                 (unsigned long)scsi_get_lba(cmd),
2851                                 sum, guard_tag);
2852
2853         } else if (err_type == BGS_REFTAG_ERR_MASK) {
2854                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2855                                         0x10, 0x3);
2856                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2857                               SAM_STAT_CHECK_CONDITION;
2858
2859                 phba->bg_reftag_err_cnt++;
2860                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2861                                 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2862                                 (unsigned long)scsi_get_lba(cmd),
2863                                 ref_tag, start_ref_tag);
2864
2865         } else if (err_type == BGS_APPTAG_ERR_MASK) {
2866                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2867                                         0x10, 0x2);
2868                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2869                               SAM_STAT_CHECK_CONDITION;
2870
2871                 phba->bg_apptag_err_cnt++;
2872                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2873                                 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2874                                 (unsigned long)scsi_get_lba(cmd),
2875                                 app_tag, start_app_tag);
2876         }
2877 }
2878
2879
2880 /*
2881  * This function checks for BlockGuard errors detected by
2882  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2883  * sense buffer will be set accordingly, paired with
2884  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2885  * detected corruption.
2886  *
2887  * Returns:
2888  *  0 - No error found
2889  *  1 - BlockGuard error found
2890  * -1 - Internal error (bad profile, ...etc)
2891  */
2892 static int
2893 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2894                   struct lpfc_iocbq *pIocbOut)
2895 {
2896         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2897         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2898         int ret = 0;
2899         uint32_t bghm = bgf->bghm;
2900         uint32_t bgstat = bgf->bgstat;
2901         uint64_t failing_sector = 0;
2902
2903         if (lpfc_bgs_get_invalid_prof(bgstat)) {
2904                 cmd->result = DID_ERROR << 16;
2905                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2906                                 "9072 BLKGRD: Invalid BG Profile in cmd"
2907                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2908                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2909                                 (unsigned long long)scsi_get_lba(cmd),
2910                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2911                 ret = (-1);
2912                 goto out;
2913         }
2914
2915         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2916                 cmd->result = DID_ERROR << 16;
2917                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2918                                 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2919                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2920                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2921                                 (unsigned long long)scsi_get_lba(cmd),
2922                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2923                 ret = (-1);
2924                 goto out;
2925         }
2926
2927         if (lpfc_bgs_get_guard_err(bgstat)) {
2928                 ret = 1;
2929
2930                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2931                                 0x10, 0x1);
2932                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2933                               SAM_STAT_CHECK_CONDITION;
2934                 phba->bg_guard_err_cnt++;
2935                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2936                                 "9055 BLKGRD: Guard Tag error in cmd"
2937                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2938                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2939                                 (unsigned long long)scsi_get_lba(cmd),
2940                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2941         }
2942
2943         if (lpfc_bgs_get_reftag_err(bgstat)) {
2944                 ret = 1;
2945
2946                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2947                                 0x10, 0x3);
2948                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2949                               SAM_STAT_CHECK_CONDITION;
2950
2951                 phba->bg_reftag_err_cnt++;
2952                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2953                                 "9056 BLKGRD: Ref Tag error in cmd"
2954                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2955                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2956                                 (unsigned long long)scsi_get_lba(cmd),
2957                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2958         }
2959
2960         if (lpfc_bgs_get_apptag_err(bgstat)) {
2961                 ret = 1;
2962
2963                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2964                                 0x10, 0x2);
2965                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2966                               SAM_STAT_CHECK_CONDITION;
2967
2968                 phba->bg_apptag_err_cnt++;
2969                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2970                                 "9061 BLKGRD: App Tag error in cmd"
2971                                 " 0x%x lba 0x%llx blk cnt 0x%x "
2972                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2973                                 (unsigned long long)scsi_get_lba(cmd),
2974                                 blk_rq_sectors(cmd->request), bgstat, bghm);
2975         }
2976
2977         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2978                 /*
2979                  * setup sense data descriptor 0 per SPC-4 as an information
2980                  * field, and put the failing LBA in it.
2981                  * This code assumes there was also a guard/app/ref tag error
2982                  * indication.
2983                  */
2984                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2985                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2986                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2987                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2988
2989                 /* bghm is a "on the wire" FC frame based count */
2990                 switch (scsi_get_prot_op(cmd)) {
2991                 case SCSI_PROT_READ_INSERT:
2992                 case SCSI_PROT_WRITE_STRIP:
2993                         bghm /= cmd->device->sector_size;
2994                         break;
2995                 case SCSI_PROT_READ_STRIP:
2996                 case SCSI_PROT_WRITE_INSERT:
2997                 case SCSI_PROT_READ_PASS:
2998                 case SCSI_PROT_WRITE_PASS:
2999                         bghm /= (cmd->device->sector_size +
3000                                 sizeof(struct scsi_dif_tuple));
3001                         break;
3002                 }
3003
3004                 failing_sector = scsi_get_lba(cmd);
3005                 failing_sector += bghm;
3006
3007                 /* Descriptor Information */
3008                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3009         }
3010
3011         if (!ret) {
3012                 /* No error was reported - problem in FW? */
3013                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3014                                 "9057 BLKGRD: Unknown error in cmd"
3015                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3016                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3017                                 (unsigned long long)scsi_get_lba(cmd),
3018                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3019
3020                 /* Calcuate what type of error it was */
3021                 lpfc_calc_bg_err(phba, lpfc_cmd);
3022         }
3023 out:
3024         return ret;
3025 }
3026
3027 /**
3028  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3029  * @phba: The Hba for which this call is being executed.
3030  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3031  *
3032  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3033  * field of @lpfc_cmd for device with SLI-4 interface spec.
3034  *
3035  * Return codes:
3036  *      2 - Error - Do not retry
3037  *      1 - Error - Retry
3038  *      0 - Success
3039  **/
3040 static int
3041 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3042 {
3043         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3044         struct scatterlist *sgel = NULL;
3045         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3046         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3047         struct sli4_sge *first_data_sgl;
3048         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3049         dma_addr_t physaddr;
3050         uint32_t num_bde = 0;
3051         uint32_t dma_len;
3052         uint32_t dma_offset = 0;
3053         int nseg, i, j;
3054         struct ulp_bde64 *bde;
3055         bool lsp_just_set = false;
3056         struct sli4_hybrid_sgl *sgl_xtra = NULL;
3057
3058         /*
3059          * There are three possibilities here - use scatter-gather segment, use
3060          * the single mapping, or neither.  Start the lpfc command prep by
3061          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3062          * data bde entry.
3063          */
3064         if (scsi_sg_count(scsi_cmnd)) {
3065                 /*
3066                  * The driver stores the segment count returned from pci_map_sg
3067                  * because this a count of dma-mappings used to map the use_sg
3068                  * pages.  They are not guaranteed to be the same for those
3069                  * architectures that implement an IOMMU.
3070                  */
3071
3072                 nseg = scsi_dma_map(scsi_cmnd);
3073                 if (unlikely(nseg <= 0))
3074                         return 1;
3075                 sgl += 1;
3076                 /* clear the last flag in the fcp_rsp map entry */
3077                 sgl->word2 = le32_to_cpu(sgl->word2);
3078                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3079                 sgl->word2 = cpu_to_le32(sgl->word2);
3080                 sgl += 1;
3081                 first_data_sgl = sgl;
3082                 lpfc_cmd->seg_cnt = nseg;
3083                 if (!phba->cfg_xpsgl &&
3084                     lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3085                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3086                                         "9074 BLKGRD:"
3087                                         " %s: Too many sg segments from "
3088                                         "dma_map_sg.  Config %d, seg_cnt %d\n",
3089                                         __func__, phba->cfg_sg_seg_cnt,
3090                                         lpfc_cmd->seg_cnt);
3091                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3092                         lpfc_cmd->seg_cnt = 0;
3093                         scsi_dma_unmap(scsi_cmnd);
3094                         return 2;
3095                 }
3096
3097                 /*
3098                  * The driver established a maximum scatter-gather segment count
3099                  * during probe that limits the number of sg elements in any
3100                  * single scsi command.  Just run through the seg_cnt and format
3101                  * the sge's.
3102                  * When using SLI-3 the driver will try to fit all the BDEs into
3103                  * the IOCB. If it can't then the BDEs get added to a BPL as it
3104                  * does for SLI-2 mode.
3105                  */
3106
3107                 /* for tracking segment boundaries */
3108                 sgel = scsi_sglist(scsi_cmnd);
3109                 j = 2;
3110                 for (i = 0; i < nseg; i++) {
3111                         sgl->word2 = 0;
3112                         if ((num_bde + 1) == nseg) {
3113                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3114                                 bf_set(lpfc_sli4_sge_type, sgl,
3115                                        LPFC_SGE_TYPE_DATA);
3116                         } else {
3117                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3118
3119                                 /* do we need to expand the segment */
3120                                 if (!lsp_just_set &&
3121                                     !((j + 1) % phba->border_sge_num) &&
3122                                     ((nseg - 1) != i)) {
3123                                         /* set LSP type */
3124                                         bf_set(lpfc_sli4_sge_type, sgl,
3125                                                LPFC_SGE_TYPE_LSP);
3126
3127                                         sgl_xtra = lpfc_get_sgl_per_hdwq(
3128                                                         phba, lpfc_cmd);
3129
3130                                         if (unlikely(!sgl_xtra)) {
3131                                                 lpfc_cmd->seg_cnt = 0;
3132                                                 scsi_dma_unmap(scsi_cmnd);
3133                                                 return 1;
3134                                         }
3135                                         sgl->addr_lo = cpu_to_le32(putPaddrLow(
3136                                                        sgl_xtra->dma_phys_sgl));
3137                                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3138                                                        sgl_xtra->dma_phys_sgl));
3139
3140                                 } else {
3141                                         bf_set(lpfc_sli4_sge_type, sgl,
3142                                                LPFC_SGE_TYPE_DATA);
3143                                 }
3144                         }
3145
3146                         if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3147                                      LPFC_SGE_TYPE_LSP)) {
3148                                 if ((nseg - 1) == i)
3149                                         bf_set(lpfc_sli4_sge_last, sgl, 1);
3150
3151                                 physaddr = sg_dma_address(sgel);
3152                                 dma_len = sg_dma_len(sgel);
3153                                 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3154                                                            physaddr));
3155                                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3156                                                            physaddr));
3157
3158                                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3159                                 sgl->word2 = cpu_to_le32(sgl->word2);
3160                                 sgl->sge_len = cpu_to_le32(dma_len);
3161
3162                                 dma_offset += dma_len;
3163                                 sgel = sg_next(sgel);
3164
3165                                 sgl++;
3166                                 lsp_just_set = false;
3167
3168                         } else {
3169                                 sgl->word2 = cpu_to_le32(sgl->word2);
3170                                 sgl->sge_len = cpu_to_le32(
3171                                                      phba->cfg_sg_dma_buf_size);
3172
3173                                 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3174                                 i = i - 1;
3175
3176                                 lsp_just_set = true;
3177                         }
3178
3179                         j++;
3180                 }
3181                 /*
3182                  * Setup the first Payload BDE. For FCoE we just key off
3183                  * Performance Hints, for FC we use lpfc_enable_pbde.
3184                  * We populate words 13-15 of IOCB/WQE.
3185                  */
3186                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3187                     phba->cfg_enable_pbde) {
3188                         bde = (struct ulp_bde64 *)
3189                                 &(iocb_cmd->unsli3.sli3Words[5]);
3190                         bde->addrLow = first_data_sgl->addr_lo;
3191                         bde->addrHigh = first_data_sgl->addr_hi;
3192                         bde->tus.f.bdeSize =
3193                                         le32_to_cpu(first_data_sgl->sge_len);
3194                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3195                         bde->tus.w = cpu_to_le32(bde->tus.w);
3196                 }
3197         } else {
3198                 sgl += 1;
3199                 /* clear the last flag in the fcp_rsp map entry */
3200                 sgl->word2 = le32_to_cpu(sgl->word2);
3201                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3202                 sgl->word2 = cpu_to_le32(sgl->word2);
3203
3204                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3205                     phba->cfg_enable_pbde) {
3206                         bde = (struct ulp_bde64 *)
3207                                 &(iocb_cmd->unsli3.sli3Words[5]);
3208                         memset(bde, 0, (sizeof(uint32_t) * 3));
3209                 }
3210         }
3211
3212         /*
3213          * Finish initializing those IOCB fields that are dependent on the
3214          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3215          * explicitly reinitialized.
3216          * all iocb memory resources are reused.
3217          */
3218         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3219
3220         /*
3221          * Due to difference in data length between DIF/non-DIF paths,
3222          * we need to set word 4 of IOCB here
3223          */
3224         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3225
3226         /*
3227          * If the OAS driver feature is enabled and the lun is enabled for
3228          * OAS, set the oas iocb related flags.
3229          */
3230         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3231                 scsi_cmnd->device->hostdata)->oas_enabled) {
3232                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3233                 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3234                         scsi_cmnd->device->hostdata)->priority;
3235         }
3236
3237         return 0;
3238 }
3239
3240 /**
3241  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3242  * @phba: The Hba for which this call is being executed.
3243  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3244  *
3245  * This is the protection/DIF aware version of
3246  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3247  * two functions eventually, but for now, it's here
3248  * Return codes:
3249  *      2 - Error - Do not retry
3250  *      1 - Error - Retry
3251  *      0 - Success
3252  **/
3253 static int
3254 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3255                 struct lpfc_io_buf *lpfc_cmd)
3256 {
3257         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3258         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3259         struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3260         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3261         uint32_t num_sge = 0;
3262         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3263         int prot_group_type = 0;
3264         int fcpdl;
3265         int ret = 1;
3266         struct lpfc_vport *vport = phba->pport;
3267
3268         /*
3269          * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3270          *  fcp_rsp regions to the first data sge entry
3271          */
3272         if (scsi_sg_count(scsi_cmnd)) {
3273                 /*
3274                  * The driver stores the segment count returned from pci_map_sg
3275                  * because this a count of dma-mappings used to map the use_sg
3276                  * pages.  They are not guaranteed to be the same for those
3277                  * architectures that implement an IOMMU.
3278                  */
3279                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3280                                         scsi_sglist(scsi_cmnd),
3281                                         scsi_sg_count(scsi_cmnd), datadir);
3282                 if (unlikely(!datasegcnt))
3283                         return 1;
3284
3285                 sgl += 1;
3286                 /* clear the last flag in the fcp_rsp map entry */
3287                 sgl->word2 = le32_to_cpu(sgl->word2);
3288                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3289                 sgl->word2 = cpu_to_le32(sgl->word2);
3290
3291                 sgl += 1;
3292                 lpfc_cmd->seg_cnt = datasegcnt;
3293
3294                 /* First check if data segment count from SCSI Layer is good */
3295                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3296                     !phba->cfg_xpsgl) {
3297                         WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3298                         ret = 2;
3299                         goto err;
3300                 }
3301
3302                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3303
3304                 switch (prot_group_type) {
3305                 case LPFC_PG_TYPE_NO_DIF:
3306                         /* Here we need to add a DISEED to the count */
3307                         if (((lpfc_cmd->seg_cnt + 1) >
3308                                         phba->cfg_total_seg_cnt) &&
3309                             !phba->cfg_xpsgl) {
3310                                 ret = 2;
3311                                 goto err;
3312                         }
3313
3314                         num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3315                                         datasegcnt, lpfc_cmd);
3316
3317                         /* we should have 2 or more entries in buffer list */
3318                         if (num_sge < 2) {
3319                                 ret = 2;
3320                                 goto err;
3321                         }
3322                         break;
3323
3324                 case LPFC_PG_TYPE_DIF_BUF:
3325                         /*
3326                          * This type indicates that protection buffers are
3327                          * passed to the driver, so that needs to be prepared
3328                          * for DMA
3329                          */
3330                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
3331                                         scsi_prot_sglist(scsi_cmnd),
3332                                         scsi_prot_sg_count(scsi_cmnd), datadir);
3333                         if (unlikely(!protsegcnt)) {
3334                                 scsi_dma_unmap(scsi_cmnd);
3335                                 return 1;
3336                         }
3337
3338                         lpfc_cmd->prot_seg_cnt = protsegcnt;
3339                         /*
3340                          * There is a minimun of 3 SGEs used for every
3341                          * protection data segment.
3342                          */
3343                         if (((lpfc_cmd->prot_seg_cnt * 3) >
3344                                         (phba->cfg_total_seg_cnt - 2)) &&
3345                             !phba->cfg_xpsgl) {
3346                                 ret = 2;
3347                                 goto err;
3348                         }
3349
3350                         num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3351                                         datasegcnt, protsegcnt, lpfc_cmd);
3352
3353                         /* we should have 3 or more entries in buffer list */
3354                         if (num_sge < 3 ||
3355                             (num_sge > phba->cfg_total_seg_cnt &&
3356                              !phba->cfg_xpsgl)) {
3357                                 ret = 2;
3358                                 goto err;
3359                         }
3360                         break;
3361
3362                 case LPFC_PG_TYPE_INVALID:
3363                 default:
3364                         scsi_dma_unmap(scsi_cmnd);
3365                         lpfc_cmd->seg_cnt = 0;
3366
3367                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3368                                         "9083 Unexpected protection group %i\n",
3369                                         prot_group_type);
3370                         return 2;
3371                 }
3372         }
3373
3374         switch (scsi_get_prot_op(scsi_cmnd)) {
3375         case SCSI_PROT_WRITE_STRIP:
3376         case SCSI_PROT_READ_STRIP:
3377                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3378                 break;
3379         case SCSI_PROT_WRITE_INSERT:
3380         case SCSI_PROT_READ_INSERT:
3381                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3382                 break;
3383         case SCSI_PROT_WRITE_PASS:
3384         case SCSI_PROT_READ_PASS:
3385                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3386                 break;
3387         }
3388
3389         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3390         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3391
3392         /*
3393          * Due to difference in data length between DIF/non-DIF paths,
3394          * we need to set word 4 of IOCB here
3395          */
3396         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3397
3398         /*
3399          * For First burst, we may need to adjust the initial transfer
3400          * length for DIF
3401          */
3402         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3403             (fcpdl < vport->cfg_first_burst_size))
3404                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3405
3406         /*
3407          * If the OAS driver feature is enabled and the lun is enabled for
3408          * OAS, set the oas iocb related flags.
3409          */
3410         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3411                 scsi_cmnd->device->hostdata)->oas_enabled)
3412                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3413
3414         return 0;
3415 err:
3416         if (lpfc_cmd->seg_cnt)
3417                 scsi_dma_unmap(scsi_cmnd);
3418         if (lpfc_cmd->prot_seg_cnt)
3419                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3420                              scsi_prot_sg_count(scsi_cmnd),
3421                              scsi_cmnd->sc_data_direction);
3422
3423         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3424                         "9084 Cannot setup S/G List for HBA"
3425                         "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3426                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3427                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3428                         prot_group_type, num_sge);
3429
3430         lpfc_cmd->seg_cnt = 0;
3431         lpfc_cmd->prot_seg_cnt = 0;
3432         return ret;
3433 }
3434
3435 /**
3436  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3437  * @phba: The Hba for which this call is being executed.
3438  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3439  *
3440  * This routine wraps the actual DMA mapping function pointer from the
3441  * lpfc_hba struct.
3442  *
3443  * Return codes:
3444  *      1 - Error
3445  *      0 - Success
3446  **/
3447 static inline int
3448 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3449 {
3450         return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3451 }
3452
3453 /**
3454  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3455  * using BlockGuard.
3456  * @phba: The Hba for which this call is being executed.
3457  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3458  *
3459  * This routine wraps the actual DMA mapping function pointer from the
3460  * lpfc_hba struct.
3461  *
3462  * Return codes:
3463  *      1 - Error
3464  *      0 - Success
3465  **/
3466 static inline int
3467 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3468 {
3469         return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3470 }
3471
3472 /**
3473  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3474  * @phba: Pointer to hba context object.
3475  * @vport: Pointer to vport object.
3476  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3477  * @rsp_iocb: Pointer to response iocb object which reported error.
3478  *
3479  * This function posts an event when there is a SCSI command reporting
3480  * error from the scsi device.
3481  **/
3482 static void
3483 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3484                 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3485         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3486         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3487         uint32_t resp_info = fcprsp->rspStatus2;
3488         uint32_t scsi_status = fcprsp->rspStatus3;
3489         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3490         struct lpfc_fast_path_event *fast_path_evt = NULL;
3491         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3492         unsigned long flags;
3493
3494         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3495                 return;
3496
3497         /* If there is queuefull or busy condition send a scsi event */
3498         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3499                 (cmnd->result == SAM_STAT_BUSY)) {
3500                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3501                 if (!fast_path_evt)
3502                         return;
3503                 fast_path_evt->un.scsi_evt.event_type =
3504                         FC_REG_SCSI_EVENT;
3505                 fast_path_evt->un.scsi_evt.subcategory =
3506                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3507                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3508                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3509                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3510                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3511                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3512                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3513         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3514                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3515                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3516                 if (!fast_path_evt)
3517                         return;
3518                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3519                         FC_REG_SCSI_EVENT;
3520                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3521                         LPFC_EVENT_CHECK_COND;
3522                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3523                         cmnd->device->lun;
3524                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3525                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3526                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3527                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3528                 fast_path_evt->un.check_cond_evt.sense_key =
3529                         cmnd->sense_buffer[2] & 0xf;
3530                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3531                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3532         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3533                      fcpi_parm &&
3534                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3535                         ((scsi_status == SAM_STAT_GOOD) &&
3536                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3537                 /*
3538                  * If status is good or resid does not match with fcp_param and
3539                  * there is valid fcpi_parm, then there is a read_check error
3540                  */
3541                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3542                 if (!fast_path_evt)
3543                         return;
3544                 fast_path_evt->un.read_check_error.header.event_type =
3545                         FC_REG_FABRIC_EVENT;
3546                 fast_path_evt->un.read_check_error.header.subcategory =
3547                         LPFC_EVENT_FCPRDCHKERR;
3548                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3549                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3550                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3551                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3552                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3553                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3554                 fast_path_evt->un.read_check_error.fcpiparam =
3555                         fcpi_parm;
3556         } else
3557                 return;
3558
3559         fast_path_evt->vport = vport;
3560         spin_lock_irqsave(&phba->hbalock, flags);
3561         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3562         spin_unlock_irqrestore(&phba->hbalock, flags);
3563         lpfc_worker_wake_up(phba);
3564         return;
3565 }
3566
3567 /**
3568  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3569  * @phba: The HBA for which this call is being executed.
3570  * @psb: The scsi buffer which is going to be un-mapped.
3571  *
3572  * This routine does DMA un-mapping of scatter gather list of scsi command
3573  * field of @lpfc_cmd for device with SLI-3 interface spec.
3574  **/
3575 static void
3576 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3577 {
3578         /*
3579          * There are only two special cases to consider.  (1) the scsi command
3580          * requested scatter-gather usage or (2) the scsi command allocated
3581          * a request buffer, but did not request use_sg.  There is a third
3582          * case, but it does not require resource deallocation.
3583          */
3584         if (psb->seg_cnt > 0)
3585                 scsi_dma_unmap(psb->pCmd);
3586         if (psb->prot_seg_cnt > 0)
3587                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3588                                 scsi_prot_sg_count(psb->pCmd),
3589                                 psb->pCmd->sc_data_direction);
3590 }
3591
3592 /**
3593  * lpfc_handler_fcp_err - FCP response handler
3594  * @vport: The virtual port for which this call is being executed.
3595  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3596  * @rsp_iocb: The response IOCB which contains FCP error.
3597  *
3598  * This routine is called to process response IOCB with status field
3599  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3600  * based upon SCSI and FCP error.
3601  **/
3602 static void
3603 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3604                     struct lpfc_iocbq *rsp_iocb)
3605 {
3606         struct lpfc_hba *phba = vport->phba;
3607         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3608         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3609         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3610         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3611         uint32_t resp_info = fcprsp->rspStatus2;
3612         uint32_t scsi_status = fcprsp->rspStatus3;
3613         uint32_t *lp;
3614         uint32_t host_status = DID_OK;
3615         uint32_t rsplen = 0;
3616         uint32_t fcpDl;
3617         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3618
3619
3620         /*
3621          *  If this is a task management command, there is no
3622          *  scsi packet associated with this lpfc_cmd.  The driver
3623          *  consumes it.
3624          */
3625         if (fcpcmd->fcpCntl2) {
3626                 scsi_status = 0;
3627                 goto out;
3628         }
3629
3630         if (resp_info & RSP_LEN_VALID) {
3631                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3632                 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3633                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3634                                          "2719 Invalid response length: "
3635                                          "tgt x%x lun x%llx cmnd x%x rsplen "
3636                                          "x%x\n", cmnd->device->id,
3637                                          cmnd->device->lun, cmnd->cmnd[0],
3638                                          rsplen);
3639                         host_status = DID_ERROR;
3640                         goto out;
3641                 }
3642                 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3643                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3644                                  "2757 Protocol failure detected during "
3645                                  "processing of FCP I/O op: "
3646                                  "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3647                                  cmnd->device->id,
3648                                  cmnd->device->lun, cmnd->cmnd[0],
3649                                  fcprsp->rspInfo3);
3650                         host_status = DID_ERROR;
3651                         goto out;
3652                 }
3653         }
3654
3655         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3656                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3657                 if (snslen > SCSI_SENSE_BUFFERSIZE)
3658                         snslen = SCSI_SENSE_BUFFERSIZE;
3659
3660                 if (resp_info & RSP_LEN_VALID)
3661                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
3662                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3663         }
3664         lp = (uint32_t *)cmnd->sense_buffer;
3665
3666         /* special handling for under run conditions */
3667         if (!scsi_status && (resp_info & RESID_UNDER)) {
3668                 /* don't log under runs if fcp set... */
3669                 if (vport->cfg_log_verbose & LOG_FCP)
3670                         logit = LOG_FCP_ERROR;
3671                 /* unless operator says so */
3672                 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3673                         logit = LOG_FCP_UNDER;
3674         }
3675
3676         lpfc_printf_vlog(vport, KERN_WARNING, logit,
3677                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
3678                          "Data: x%x x%x x%x x%x x%x\n",
3679                          cmnd->cmnd[0], scsi_status,
3680                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3681                          be32_to_cpu(fcprsp->rspResId),
3682                          be32_to_cpu(fcprsp->rspSnsLen),
3683                          be32_to_cpu(fcprsp->rspRspLen),
3684                          fcprsp->rspInfo3);
3685
3686         scsi_set_resid(cmnd, 0);
3687         fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3688         if (resp_info & RESID_UNDER) {
3689                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3690
3691                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3692                                  "9025 FCP Underrun, expected %d, "
3693                                  "residual %d Data: x%x x%x x%x\n",
3694                                  fcpDl,
3695                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3696                                  cmnd->underflow);
3697
3698                 /*
3699                  * If there is an under run, check if under run reported by
3700                  * storage array is same as the under run reported by HBA.
3701                  * If this is not same, there is a dropped frame.
3702                  */
3703                 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3704                         lpfc_printf_vlog(vport, KERN_WARNING,
3705                                          LOG_FCP | LOG_FCP_ERROR,
3706                                          "9026 FCP Read Check Error "
3707                                          "and Underrun Data: x%x x%x x%x x%x\n",
3708                                          fcpDl,
3709                                          scsi_get_resid(cmnd), fcpi_parm,
3710                                          cmnd->cmnd[0]);
3711                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3712                         host_status = DID_ERROR;
3713                 }
3714                 /*
3715                  * The cmnd->underflow is the minimum number of bytes that must
3716                  * be transferred for this command.  Provided a sense condition
3717                  * is not present, make sure the actual amount transferred is at
3718                  * least the underflow value or fail.
3719                  */
3720                 if (!(resp_info & SNS_LEN_VALID) &&
3721                     (scsi_status == SAM_STAT_GOOD) &&
3722                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3723                      < cmnd->underflow)) {
3724                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3725                                          "9027 FCP command x%x residual "
3726                                          "underrun converted to error "
3727                                          "Data: x%x x%x x%x\n",
3728                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
3729                                          scsi_get_resid(cmnd), cmnd->underflow);
3730                         host_status = DID_ERROR;
3731                 }
3732         } else if (resp_info & RESID_OVER) {
3733                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3734                                  "9028 FCP command x%x residual overrun error. "
3735                                  "Data: x%x x%x\n", cmnd->cmnd[0],
3736                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3737                 host_status = DID_ERROR;
3738
3739         /*
3740          * Check SLI validation that all the transfer was actually done
3741          * (fcpi_parm should be zero). Apply check only to reads.
3742          */
3743         } else if (fcpi_parm) {
3744                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3745                                  "9029 FCP %s Check Error xri x%x  Data: "
3746                                  "x%x x%x x%x x%x x%x\n",
3747                                  ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3748                                  "Read" : "Write"),
3749                                  ((phba->sli_rev == LPFC_SLI_REV4) ?
3750                                  lpfc_cmd->cur_iocbq.sli4_xritag :
3751                                  rsp_iocb->iocb.ulpContext),
3752                                  fcpDl, be32_to_cpu(fcprsp->rspResId),
3753                                  fcpi_parm, cmnd->cmnd[0], scsi_status);
3754
3755                 /* There is some issue with the LPe12000 that causes it
3756                  * to miscalculate the fcpi_parm and falsely trip this
3757                  * recovery logic.  Detect this case and don't error when true.
3758                  */
3759                 if (fcpi_parm > fcpDl)
3760                         goto out;
3761
3762                 switch (scsi_status) {
3763                 case SAM_STAT_GOOD:
3764                 case SAM_STAT_CHECK_CONDITION:
3765                         /* Fabric dropped a data frame. Fail any successful
3766                          * command in which we detected dropped frames.
3767                          * A status of good or some check conditions could
3768                          * be considered a successful command.
3769                          */
3770                         host_status = DID_ERROR;
3771                         break;
3772                 }
3773                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3774         }
3775
3776  out:
3777         cmnd->result = host_status << 16 | scsi_status;
3778         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3779 }
3780
3781 /**
3782  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3783  * @phba: The Hba for which this call is being executed.
3784  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3785  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3786  *
3787  * This routine assigns scsi command result by looking into response IOCB
3788  * status field appropriately. This routine handles QUEUE FULL condition as
3789  * well by ramping down device queue depth.
3790  **/
3791 static void
3792 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3793                         struct lpfc_iocbq *pIocbOut)
3794 {
3795         struct lpfc_io_buf *lpfc_cmd =
3796                 (struct lpfc_io_buf *) pIocbIn->context1;
3797         struct lpfc_vport      *vport = pIocbIn->vport;
3798         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3799         struct lpfc_nodelist *pnode = rdata->pnode;
3800         struct scsi_cmnd *cmd;
3801         unsigned long flags;
3802         struct lpfc_fast_path_event *fast_path_evt;
3803         struct Scsi_Host *shost;
3804         int idx;
3805         uint32_t logit = LOG_FCP;
3806
3807         /* Guard against abort handler being called at same time */
3808         spin_lock(&lpfc_cmd->buf_lock);
3809
3810         /* Sanity check on return of outstanding command */
3811         cmd = lpfc_cmd->pCmd;
3812         if (!cmd || !phba) {
3813                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3814                                  "2621 IO completion: Not an active IO\n");
3815                 spin_unlock(&lpfc_cmd->buf_lock);
3816                 return;
3817         }
3818
3819         idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3820         if (phba->sli4_hba.hdwq)
3821                 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3822
3823 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3824         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
3825                 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
3826 #endif
3827         shost = cmd->device->host;
3828
3829         lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3830         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3831         /* pick up SLI4 exhange busy status from HBA */
3832         if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
3833                 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3834         else
3835                 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3836
3837 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3838         if (lpfc_cmd->prot_data_type) {
3839                 struct scsi_dif_tuple *src = NULL;
3840
3841                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3842                 /*
3843                  * Used to restore any changes to protection
3844                  * data for error injection.
3845                  */
3846                 switch (lpfc_cmd->prot_data_type) {
3847                 case LPFC_INJERR_REFTAG:
3848                         src->ref_tag =
3849                                 lpfc_cmd->prot_data;
3850                         break;
3851                 case LPFC_INJERR_APPTAG:
3852                         src->app_tag =
3853                                 (uint16_t)lpfc_cmd->prot_data;
3854                         break;
3855                 case LPFC_INJERR_GUARD:
3856                         src->guard_tag =
3857                                 (uint16_t)lpfc_cmd->prot_data;
3858                         break;
3859                 default:
3860                         break;
3861                 }
3862
3863                 lpfc_cmd->prot_data = 0;
3864                 lpfc_cmd->prot_data_type = 0;
3865                 lpfc_cmd->prot_data_segment = NULL;
3866         }
3867 #endif
3868
3869         if (unlikely(lpfc_cmd->status)) {
3870                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3871                     (lpfc_cmd->result & IOERR_DRVR_MASK))
3872                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3873                 else if (lpfc_cmd->status >= IOSTAT_CNT)
3874                         lpfc_cmd->status = IOSTAT_DEFAULT;
3875                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3876                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
3877                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3878                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3879                         logit = 0;
3880                 else
3881                         logit = LOG_FCP | LOG_FCP_UNDER;
3882                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3883                          "9030 FCP cmd x%x failed <%d/%lld> "
3884                          "status: x%x result: x%x "
3885                          "sid: x%x did: x%x oxid: x%x "
3886                          "Data: x%x x%x\n",
3887                          cmd->cmnd[0],
3888                          cmd->device ? cmd->device->id : 0xffff,
3889                          cmd->device ? cmd->device->lun : 0xffff,
3890                          lpfc_cmd->status, lpfc_cmd->result,
3891                          vport->fc_myDID,
3892                          (pnode) ? pnode->nlp_DID : 0,
3893                          phba->sli_rev == LPFC_SLI_REV4 ?
3894                              lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3895                          pIocbOut->iocb.ulpContext,
3896                          lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3897
3898                 switch (lpfc_cmd->status) {
3899                 case IOSTAT_FCP_RSP_ERROR:
3900                         /* Call FCP RSP handler to determine result */
3901                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3902                         break;
3903                 case IOSTAT_NPORT_BSY:
3904                 case IOSTAT_FABRIC_BSY:
3905                         cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3906                         fast_path_evt = lpfc_alloc_fast_evt(phba);
3907                         if (!fast_path_evt)
3908                                 break;
3909                         fast_path_evt->un.fabric_evt.event_type =
3910                                 FC_REG_FABRIC_EVENT;
3911                         fast_path_evt->un.fabric_evt.subcategory =
3912                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3913                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3914                         if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3915                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3916                                         &pnode->nlp_portname,
3917                                         sizeof(struct lpfc_name));
3918                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3919                                         &pnode->nlp_nodename,
3920                                         sizeof(struct lpfc_name));
3921                         }
3922                         fast_path_evt->vport = vport;
3923                         fast_path_evt->work_evt.evt =
3924                                 LPFC_EVT_FASTPATH_MGMT_EVT;
3925                         spin_lock_irqsave(&phba->hbalock, flags);
3926                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
3927                                 &phba->work_list);
3928                         spin_unlock_irqrestore(&phba->hbalock, flags);
3929                         lpfc_worker_wake_up(phba);
3930                         break;
3931                 case IOSTAT_LOCAL_REJECT:
3932                 case IOSTAT_REMOTE_STOP:
3933                         if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3934                             lpfc_cmd->result ==
3935                                         IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3936                             lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3937                             lpfc_cmd->result ==
3938                                         IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3939                                 cmd->result = DID_NO_CONNECT << 16;
3940                                 break;
3941                         }
3942                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3943                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
3944                             lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3945                             lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3946                                 cmd->result = DID_REQUEUE << 16;
3947                                 break;
3948                         }
3949                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3950                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3951                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3952                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3953                                         /*
3954                                          * This is a response for a BG enabled
3955                                          * cmd. Parse BG error
3956                                          */
3957                                         lpfc_parse_bg_err(phba, lpfc_cmd,
3958                                                         pIocbOut);
3959                                         break;
3960                                 } else {
3961                                         lpfc_printf_vlog(vport, KERN_WARNING,
3962                                                         LOG_BG,
3963                                                         "9031 non-zero BGSTAT "
3964                                                         "on unprotected cmd\n");
3965                                 }
3966                         }
3967                         if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3968                                 && (phba->sli_rev == LPFC_SLI_REV4)
3969                                 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3970                                 /* This IO was aborted by the target, we don't
3971                                  * know the rxid and because we did not send the
3972                                  * ABTS we cannot generate and RRQ.
3973                                  */
3974                                 lpfc_set_rrq_active(phba, pnode,
3975                                         lpfc_cmd->cur_iocbq.sli4_lxritag,
3976                                         0, 0);
3977                         }
3978                         fallthrough;
3979                 default:
3980                         cmd->result = DID_ERROR << 16;
3981                         break;
3982                 }
3983
3984                 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3985                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3986                         cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3987                                       SAM_STAT_BUSY;
3988         } else
3989                 cmd->result = DID_OK << 16;
3990
3991         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3992                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3993
3994                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3995                                  "0710 Iodone <%d/%llu> cmd x%px, error "
3996                                  "x%x SNS x%x x%x Data: x%x x%x\n",
3997                                  cmd->device->id, cmd->device->lun, cmd,
3998                                  cmd->result, *lp, *(lp + 3), cmd->retries,
3999                                  scsi_get_resid(cmd));
4000         }
4001
4002         lpfc_update_stats(vport, lpfc_cmd);
4003         if (vport->cfg_max_scsicmpl_time &&
4004            time_after(jiffies, lpfc_cmd->start_time +
4005                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4006                 spin_lock_irqsave(shost->host_lock, flags);
4007                 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4008                         if (pnode->cmd_qdepth >
4009                                 atomic_read(&pnode->cmd_pending) &&
4010                                 (atomic_read(&pnode->cmd_pending) >
4011                                 LPFC_MIN_TGT_QDEPTH) &&
4012                                 ((cmd->cmnd[0] == READ_10) ||
4013                                 (cmd->cmnd[0] == WRITE_10)))
4014                                 pnode->cmd_qdepth =
4015                                         atomic_read(&pnode->cmd_pending);
4016
4017                         pnode->last_change_time = jiffies;
4018                 }
4019                 spin_unlock_irqrestore(shost->host_lock, flags);
4020         }
4021         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4022
4023         lpfc_cmd->pCmd = NULL;
4024         spin_unlock(&lpfc_cmd->buf_lock);
4025
4026 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4027         if (lpfc_cmd->ts_cmd_start) {
4028                 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4029                 lpfc_cmd->ts_data_io = ktime_get_ns();
4030                 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4031                 lpfc_io_ktime(phba, lpfc_cmd);
4032         }
4033 #endif
4034         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4035         cmd->scsi_done(cmd);
4036
4037         /*
4038          * If there is an abort thread waiting for command completion
4039          * wake up the thread.
4040          */
4041         spin_lock(&lpfc_cmd->buf_lock);
4042         lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4043         if (lpfc_cmd->waitq)
4044                 wake_up(lpfc_cmd->waitq);
4045         spin_unlock(&lpfc_cmd->buf_lock);
4046
4047         lpfc_release_scsi_buf(phba, lpfc_cmd);
4048 }
4049
4050 /**
4051  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4052  * @data: A pointer to the immediate command data portion of the IOCB.
4053  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4054  *
4055  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4056  * byte swapping the data to big endian format for transmission on the wire.
4057  **/
4058 static void
4059 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4060 {
4061         int i, j;
4062         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4063              i += sizeof(uint32_t), j++) {
4064                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4065         }
4066 }
4067
4068 /**
4069  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4070  * @vport: The virtual port for which this call is being executed.
4071  * @lpfc_cmd: The scsi command which needs to send.
4072  * @pnode: Pointer to lpfc_nodelist.
4073  *
4074  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4075  * to transfer for device with SLI3 interface spec.
4076  **/
4077 static void
4078 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4079                     struct lpfc_nodelist *pnode)
4080 {
4081         struct lpfc_hba *phba = vport->phba;
4082         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4083         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4084         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4085         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4086         struct lpfc_sli4_hdw_queue *hdwq = NULL;
4087         int datadir = scsi_cmnd->sc_data_direction;
4088         int idx;
4089         uint8_t *ptr;
4090         bool sli4;
4091         uint32_t fcpdl;
4092
4093         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4094                 return;
4095
4096         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4097         /* clear task management bits */
4098         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4099
4100         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4101                         &lpfc_cmd->fcp_cmnd->fcp_lun);
4102
4103         ptr = &fcp_cmnd->fcpCdb[0];
4104         memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4105         if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4106                 ptr += scsi_cmnd->cmd_len;
4107                 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4108         }
4109
4110         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4111
4112         sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4113         piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4114         idx = lpfc_cmd->hdwq_no;
4115         if (phba->sli4_hba.hdwq)
4116                 hdwq = &phba->sli4_hba.hdwq[idx];
4117
4118         /*
4119          * There are three possibilities here - use scatter-gather segment, use
4120          * the single mapping, or neither.  Start the lpfc command prep by
4121          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4122          * data bde entry.
4123          */
4124         if (scsi_sg_count(scsi_cmnd)) {
4125                 if (datadir == DMA_TO_DEVICE) {
4126                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4127                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4128                         if (vport->cfg_first_burst_size &&
4129                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
4130                                 fcpdl = scsi_bufflen(scsi_cmnd);
4131                                 if (fcpdl < vport->cfg_first_burst_size)
4132                                         piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4133                                 else
4134                                         piocbq->iocb.un.fcpi.fcpi_XRdy =
4135                                                 vport->cfg_first_burst_size;
4136                         }
4137                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
4138                         if (hdwq)
4139                                 hdwq->scsi_cstat.output_requests++;
4140                 } else {
4141                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4142                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4143                         fcp_cmnd->fcpCntl3 = READ_DATA;
4144                         if (hdwq)
4145                                 hdwq->scsi_cstat.input_requests++;
4146                 }
4147         } else {
4148                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4149                 iocb_cmd->un.fcpi.fcpi_parm = 0;
4150                 iocb_cmd->ulpPU = 0;
4151                 fcp_cmnd->fcpCntl3 = 0;
4152                 if (hdwq)
4153                         hdwq->scsi_cstat.control_requests++;
4154         }
4155         if (phba->sli_rev == 3 &&
4156             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4157                 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4158         /*
4159          * Finish initializing those IOCB fields that are independent
4160          * of the scsi_cmnd request_buffer
4161          */
4162         piocbq->iocb.ulpContext = pnode->nlp_rpi;
4163         if (sli4)
4164                 piocbq->iocb.ulpContext =
4165                   phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4166         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4167                 piocbq->iocb.ulpFCP2Rcvy = 1;
4168         else
4169                 piocbq->iocb.ulpFCP2Rcvy = 0;
4170
4171         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4172         piocbq->context1  = lpfc_cmd;
4173         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4174         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4175         piocbq->vport = vport;
4176 }
4177
4178 /**
4179  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4180  * @vport: The virtual port for which this call is being executed.
4181  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4182  * @lun: Logical unit number.
4183  * @task_mgmt_cmd: SCSI task management command.
4184  *
4185  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4186  * for device with SLI-3 interface spec.
4187  *
4188  * Return codes:
4189  *   0 - Error
4190  *   1 - Success
4191  **/
4192 static int
4193 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4194                              struct lpfc_io_buf *lpfc_cmd,
4195                              uint64_t lun,
4196                              uint8_t task_mgmt_cmd)
4197 {
4198         struct lpfc_iocbq *piocbq;
4199         IOCB_t *piocb;
4200         struct fcp_cmnd *fcp_cmnd;
4201         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4202         struct lpfc_nodelist *ndlp = rdata->pnode;
4203
4204         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4205             ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4206                 return 0;
4207
4208         piocbq = &(lpfc_cmd->cur_iocbq);
4209         piocbq->vport = vport;
4210
4211         piocb = &piocbq->iocb;
4212
4213         fcp_cmnd = lpfc_cmd->fcp_cmnd;
4214         /* Clear out any old data in the FCP command area */
4215         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4216         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4217         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4218         if (vport->phba->sli_rev == 3 &&
4219             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4220                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4221         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4222         piocb->ulpContext = ndlp->nlp_rpi;
4223         if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4224                 piocb->ulpContext =
4225                   vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4226         }
4227         piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4228         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4229         piocb->ulpPU = 0;
4230         piocb->un.fcpi.fcpi_parm = 0;
4231
4232         /* ulpTimeout is only one byte */
4233         if (lpfc_cmd->timeout > 0xff) {
4234                 /*
4235                  * Do not timeout the command at the firmware level.
4236                  * The driver will provide the timeout mechanism.
4237                  */
4238                 piocb->ulpTimeout = 0;
4239         } else
4240                 piocb->ulpTimeout = lpfc_cmd->timeout;
4241
4242         if (vport->phba->sli_rev == LPFC_SLI_REV4)
4243                 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4244
4245         return 1;
4246 }
4247
4248 /**
4249  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4250  * @phba: The hba struct for which this call is being executed.
4251  * @dev_grp: The HBA PCI-Device group number.
4252  *
4253  * This routine sets up the SCSI interface API function jump table in @phba
4254  * struct.
4255  * Returns: 0 - success, -ENODEV - failure.
4256  **/
4257 int
4258 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4259 {
4260
4261         phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4262         phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4263
4264         switch (dev_grp) {
4265         case LPFC_PCI_DEV_LP:
4266                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4267                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4268                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4269                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4270                 break;
4271         case LPFC_PCI_DEV_OC:
4272                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4273                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4274                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4275                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4276                 break;
4277         default:
4278                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4279                                 "1418 Invalid HBA PCI-device group: 0x%x\n",
4280                                 dev_grp);
4281                 return -ENODEV;
4282                 break;
4283         }
4284         phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4285         phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4286         return 0;
4287 }
4288
4289 /**
4290  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4291  * @phba: The Hba for which this call is being executed.
4292  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4293  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4294  *
4295  * This routine is IOCB completion routine for device reset and target reset
4296  * routine. This routine release scsi buffer associated with lpfc_cmd.
4297  **/
4298 static void
4299 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4300                         struct lpfc_iocbq *cmdiocbq,
4301                         struct lpfc_iocbq *rspiocbq)
4302 {
4303         struct lpfc_io_buf *lpfc_cmd =
4304                 (struct lpfc_io_buf *) cmdiocbq->context1;
4305         if (lpfc_cmd)
4306                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4307         return;
4308 }
4309
4310 /**
4311  * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4312  *                             if issuing a pci_bus_reset is possibly unsafe
4313  * @phba: lpfc_hba pointer.
4314  *
4315  * Description:
4316  * Walks the bus_list to ensure only PCI devices with Emulex
4317  * vendor id, device ids that support hot reset, and only one occurrence
4318  * of function 0.
4319  *
4320  * Returns:
4321  * -EBADSLT,  detected invalid device
4322  *      0,    successful
4323  */
4324 int
4325 lpfc_check_pci_resettable(struct lpfc_hba *phba)
4326 {
4327         const struct pci_dev *pdev = phba->pcidev;
4328         struct pci_dev *ptr = NULL;
4329         u8 counter = 0;
4330
4331         /* Walk the list of devices on the pci_dev's bus */
4332         list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4333                 /* Check for Emulex Vendor ID */
4334                 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4335                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4336                                         "8346 Non-Emulex vendor found: "
4337                                         "0x%04x\n", ptr->vendor);
4338                         return -EBADSLT;
4339                 }
4340
4341                 /* Check for valid Emulex Device ID */
4342                 switch (ptr->device) {
4343                 case PCI_DEVICE_ID_LANCER_FC:
4344                 case PCI_DEVICE_ID_LANCER_G6_FC:
4345                 case PCI_DEVICE_ID_LANCER_G7_FC:
4346                         break;
4347                 default:
4348                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4349                                         "8347 Invalid device found: "
4350                                         "0x%04x\n", ptr->device);
4351                         return -EBADSLT;
4352                 }
4353
4354                 /* Check for only one function 0 ID to ensure only one HBA on
4355                  * secondary bus
4356                  */
4357                 if (ptr->devfn == 0) {
4358                         if (++counter > 1) {
4359                                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4360                                                 "8348 More than one device on "
4361                                                 "secondary bus found\n");
4362                                 return -EBADSLT;
4363                         }
4364                 }
4365         }
4366
4367         return 0;
4368 }
4369
4370 /**
4371  * lpfc_info - Info entry point of scsi_host_template data structure
4372  * @host: The scsi host for which this call is being executed.
4373  *
4374  * This routine provides module information about hba.
4375  *
4376  * Reutrn code:
4377  *   Pointer to char - Success.
4378  **/
4379 const char *
4380 lpfc_info(struct Scsi_Host *host)
4381 {
4382         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4383         struct lpfc_hba   *phba = vport->phba;
4384         int link_speed = 0;
4385         static char lpfcinfobuf[384];
4386         char tmp[384] = {0};
4387
4388         memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4389         if (phba && phba->pcidev){
4390                 /* Model Description */
4391                 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4392                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4393                     sizeof(lpfcinfobuf))
4394                         goto buffer_done;
4395
4396                 /* PCI Info */
4397                 scnprintf(tmp, sizeof(tmp),
4398                           " on PCI bus %02x device %02x irq %d",
4399                           phba->pcidev->bus->number, phba->pcidev->devfn,
4400                           phba->pcidev->irq);
4401                 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4402                     sizeof(lpfcinfobuf))
4403                         goto buffer_done;
4404
4405                 /* Port Number */
4406                 if (phba->Port[0]) {
4407                         scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4408                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4409                             sizeof(lpfcinfobuf))
4410                                 goto buffer_done;
4411                 }
4412
4413                 /* Link Speed */
4414                 link_speed = lpfc_sli_port_speed_get(phba);
4415                 if (link_speed != 0) {
4416                         scnprintf(tmp, sizeof(tmp),
4417                                   " Logical Link Speed: %d Mbps", link_speed);
4418                         if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4419                             sizeof(lpfcinfobuf))
4420                                 goto buffer_done;
4421                 }
4422
4423                 /* PCI resettable */
4424                 if (!lpfc_check_pci_resettable(phba)) {
4425                         scnprintf(tmp, sizeof(tmp), " PCI resettable");
4426                         strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4427                 }
4428         }
4429
4430 buffer_done:
4431         return lpfcinfobuf;
4432 }
4433
4434 /**
4435  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4436  * @phba: The Hba for which this call is being executed.
4437  *
4438  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4439  * The default value of cfg_poll_tmo is 10 milliseconds.
4440  **/
4441 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4442 {
4443         unsigned long  poll_tmo_expires =
4444                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4445
4446         if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4447                 mod_timer(&phba->fcp_poll_timer,
4448                           poll_tmo_expires);
4449 }
4450
4451 /**
4452  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4453  * @phba: The Hba for which this call is being executed.
4454  *
4455  * This routine starts the fcp_poll_timer of @phba.
4456  **/
4457 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4458 {
4459         lpfc_poll_rearm_timer(phba);
4460 }
4461
4462 /**
4463  * lpfc_poll_timeout - Restart polling timer
4464  * @ptr: Map to lpfc_hba data structure pointer.
4465  *
4466  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4467  * and FCP Ring interrupt is disable.
4468  **/
4469
4470 void lpfc_poll_timeout(struct timer_list *t)
4471 {
4472         struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4473
4474         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4475                 lpfc_sli_handle_fast_ring_event(phba,
4476                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4477
4478                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4479                         lpfc_poll_rearm_timer(phba);
4480         }
4481 }
4482
4483 /**
4484  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4485  * @cmnd: Pointer to scsi_cmnd data structure.
4486  * @done: Pointer to done routine.
4487  *
4488  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4489  * This routine prepares an IOCB from scsi command and provides to firmware.
4490  * The @done callback is invoked after driver finished processing the command.
4491  *
4492  * Return value :
4493  *   0 - Success
4494  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4495  **/
4496 static int
4497 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4498 {
4499         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4500         struct lpfc_hba   *phba = vport->phba;
4501         struct lpfc_rport_data *rdata;
4502         struct lpfc_nodelist *ndlp;
4503         struct lpfc_io_buf *lpfc_cmd;
4504         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4505         int err, idx;
4506 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4507         uint64_t start = 0L;
4508
4509         if (phba->ktime_on)
4510                 start = ktime_get_ns();
4511 #endif
4512
4513         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4514
4515         /* sanity check on references */
4516         if (unlikely(!rdata) || unlikely(!rport))
4517                 goto out_fail_command;
4518
4519         err = fc_remote_port_chkready(rport);
4520         if (err) {
4521                 cmnd->result = err;
4522                 goto out_fail_command;
4523         }
4524         ndlp = rdata->pnode;
4525
4526         if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4527                 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4528
4529                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4530                                 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4531                                 " op:%02x str=%s without registering for"
4532                                 " BlockGuard - Rejecting command\n",
4533                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4534                                 dif_op_str[scsi_get_prot_op(cmnd)]);
4535                 goto out_fail_command;
4536         }
4537
4538         /*
4539          * Catch race where our node has transitioned, but the
4540          * transport is still transitioning.
4541          */
4542         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4543                 goto out_tgt_busy;
4544         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4545                 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4546                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4547                                          "3377 Target Queue Full, scsi Id:%d "
4548                                          "Qdepth:%d Pending command:%d"
4549                                          " WWNN:%02x:%02x:%02x:%02x:"
4550                                          "%02x:%02x:%02x:%02x, "
4551                                          " WWPN:%02x:%02x:%02x:%02x:"
4552                                          "%02x:%02x:%02x:%02x",
4553                                          ndlp->nlp_sid, ndlp->cmd_qdepth,
4554                                          atomic_read(&ndlp->cmd_pending),
4555                                          ndlp->nlp_nodename.u.wwn[0],
4556                                          ndlp->nlp_nodename.u.wwn[1],
4557                                          ndlp->nlp_nodename.u.wwn[2],
4558                                          ndlp->nlp_nodename.u.wwn[3],
4559                                          ndlp->nlp_nodename.u.wwn[4],
4560                                          ndlp->nlp_nodename.u.wwn[5],
4561                                          ndlp->nlp_nodename.u.wwn[6],
4562                                          ndlp->nlp_nodename.u.wwn[7],
4563                                          ndlp->nlp_portname.u.wwn[0],
4564                                          ndlp->nlp_portname.u.wwn[1],
4565                                          ndlp->nlp_portname.u.wwn[2],
4566                                          ndlp->nlp_portname.u.wwn[3],
4567                                          ndlp->nlp_portname.u.wwn[4],
4568                                          ndlp->nlp_portname.u.wwn[5],
4569                                          ndlp->nlp_portname.u.wwn[6],
4570                                          ndlp->nlp_portname.u.wwn[7]);
4571                         goto out_tgt_busy;
4572                 }
4573         }
4574
4575         lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4576         if (lpfc_cmd == NULL) {
4577                 lpfc_rampdown_queue_depth(phba);
4578
4579                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4580                                  "0707 driver's buffer pool is empty, "
4581                                  "IO busied\n");
4582                 goto out_host_busy;
4583         }
4584
4585         /*
4586          * Store the midlayer's command structure for the completion phase
4587          * and complete the command initialization.
4588          */
4589         lpfc_cmd->pCmd  = cmnd;
4590         lpfc_cmd->rdata = rdata;
4591         lpfc_cmd->ndlp = ndlp;
4592         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4593
4594         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4595                 if (vport->phba->cfg_enable_bg) {
4596                         lpfc_printf_vlog(vport,
4597                                          KERN_INFO, LOG_SCSI_CMD,
4598                                          "9033 BLKGRD: rcvd %s cmd:x%x "
4599                                          "sector x%llx cnt %u pt %x\n",
4600                                          dif_op_str[scsi_get_prot_op(cmnd)],
4601                                          cmnd->cmnd[0],
4602                                          (unsigned long long)scsi_get_lba(cmnd),
4603                                          blk_rq_sectors(cmnd->request),
4604                                          (cmnd->cmnd[1]>>5));
4605                 }
4606                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4607         } else {
4608                 if (vport->phba->cfg_enable_bg) {
4609                         lpfc_printf_vlog(vport,
4610                                          KERN_INFO, LOG_SCSI_CMD,
4611                                          "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4612                                          "x%x sector x%llx cnt %u pt %x\n",
4613                                          cmnd->cmnd[0],
4614                                          (unsigned long long)scsi_get_lba(cmnd),
4615                                          blk_rq_sectors(cmnd->request),
4616                                          (cmnd->cmnd[1]>>5));
4617                 }
4618                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4619         }
4620
4621         if (unlikely(err)) {
4622                 if (err == 2) {
4623                         cmnd->result = DID_ERROR << 16;
4624                         goto out_fail_command_release_buf;
4625                 }
4626                 goto out_host_busy_free_buf;
4627         }
4628
4629         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4630
4631 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4632         if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4633                 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
4634 #endif
4635         err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4636                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4637 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4638         if (start) {
4639                 lpfc_cmd->ts_cmd_start = start;
4640                 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
4641                 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
4642         } else {
4643                 lpfc_cmd->ts_cmd_start = 0;
4644         }
4645 #endif
4646         if (err) {
4647                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4648                                  "3376 FCP could not issue IOCB err %x"
4649                                  "FCP cmd x%x <%d/%llu> "
4650                                  "sid: x%x did: x%x oxid: x%x "
4651                                  "Data: x%x x%x x%x x%x\n",
4652                                  err, cmnd->cmnd[0],
4653                                  cmnd->device ? cmnd->device->id : 0xffff,
4654                                  cmnd->device ? cmnd->device->lun : (u64) -1,
4655                                  vport->fc_myDID, ndlp->nlp_DID,
4656                                  phba->sli_rev == LPFC_SLI_REV4 ?
4657                                  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4658                                  lpfc_cmd->cur_iocbq.iocb.ulpContext,
4659                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4660                                  lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4661                                  (uint32_t)
4662                                  (cmnd->request->timeout / 1000));
4663
4664                 goto out_host_busy_free_buf;
4665         }
4666         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4667                 lpfc_sli_handle_fast_ring_event(phba,
4668                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4669
4670                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4671                         lpfc_poll_rearm_timer(phba);
4672         }
4673
4674         if (phba->cfg_xri_rebalancing)
4675                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4676
4677         return 0;
4678
4679  out_host_busy_free_buf:
4680         idx = lpfc_cmd->hdwq_no;
4681         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4682         if (phba->sli4_hba.hdwq) {
4683                 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4684                 case WRITE_DATA:
4685                         phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4686                         break;
4687                 case READ_DATA:
4688                         phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4689                         break;
4690                 default:
4691                         phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4692                 }
4693         }
4694         lpfc_release_scsi_buf(phba, lpfc_cmd);
4695  out_host_busy:
4696         return SCSI_MLQUEUE_HOST_BUSY;
4697
4698  out_tgt_busy:
4699         return SCSI_MLQUEUE_TARGET_BUSY;
4700
4701  out_fail_command_release_buf:
4702         lpfc_release_scsi_buf(phba, lpfc_cmd);
4703
4704  out_fail_command:
4705         cmnd->scsi_done(cmnd);
4706         return 0;
4707 }
4708
4709
4710 /**
4711  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4712  * @cmnd: Pointer to scsi_cmnd data structure.
4713  *
4714  * This routine aborts @cmnd pending in base driver.
4715  *
4716  * Return code :
4717  *   0x2003 - Error
4718  *   0x2002 - Success
4719  **/
4720 static int
4721 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4722 {
4723         struct Scsi_Host  *shost = cmnd->device->host;
4724         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4725         struct lpfc_hba   *phba = vport->phba;
4726         struct lpfc_iocbq *iocb;
4727         struct lpfc_iocbq *abtsiocb;
4728         struct lpfc_io_buf *lpfc_cmd;
4729         IOCB_t *cmd, *icmd;
4730         int ret = SUCCESS, status = 0;
4731         struct lpfc_sli_ring *pring_s4 = NULL;
4732         int ret_val;
4733         unsigned long flags;
4734         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4735
4736         status = fc_block_scsi_eh(cmnd);
4737         if (status != 0 && status != SUCCESS)
4738                 return status;
4739
4740         lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4741         if (!lpfc_cmd)
4742                 return ret;
4743
4744         spin_lock_irqsave(&phba->hbalock, flags);
4745         /* driver queued commands are in process of being flushed */
4746         if (phba->hba_flag & HBA_IOQ_FLUSH) {
4747                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4748                         "3168 SCSI Layer abort requested I/O has been "
4749                         "flushed by LLD.\n");
4750                 ret = FAILED;
4751                 goto out_unlock;
4752         }
4753
4754         /* Guard against IO completion being called at same time */
4755         spin_lock(&lpfc_cmd->buf_lock);
4756
4757         if (!lpfc_cmd->pCmd) {
4758                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4759                          "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4760                          "x%x ID %d LUN %llu\n",
4761                          SUCCESS, cmnd->device->id, cmnd->device->lun);
4762                 goto out_unlock_buf;
4763         }
4764
4765         iocb = &lpfc_cmd->cur_iocbq;
4766         if (phba->sli_rev == LPFC_SLI_REV4) {
4767                 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4768                 if (!pring_s4) {
4769                         ret = FAILED;
4770                         goto out_unlock_buf;
4771                 }
4772                 spin_lock(&pring_s4->ring_lock);
4773         }
4774         /* the command is in process of being cancelled */
4775         if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4776                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4777                         "3169 SCSI Layer abort requested I/O has been "
4778                         "cancelled by LLD.\n");
4779                 ret = FAILED;
4780                 goto out_unlock_ring;
4781         }
4782         /*
4783          * If pCmd field of the corresponding lpfc_io_buf structure
4784          * points to a different SCSI command, then the driver has
4785          * already completed this command, but the midlayer did not
4786          * see the completion before the eh fired. Just return SUCCESS.
4787          */
4788         if (lpfc_cmd->pCmd != cmnd) {
4789                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4790                         "3170 SCSI Layer abort requested I/O has been "
4791                         "completed by LLD.\n");
4792                 goto out_unlock_ring;
4793         }
4794
4795         BUG_ON(iocb->context1 != lpfc_cmd);
4796
4797         /* abort issued in recovery is still in progress */
4798         if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4799                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4800                          "3389 SCSI Layer I/O Abort Request is pending\n");
4801                 if (phba->sli_rev == LPFC_SLI_REV4)
4802                         spin_unlock(&pring_s4->ring_lock);
4803                 spin_unlock(&lpfc_cmd->buf_lock);
4804                 spin_unlock_irqrestore(&phba->hbalock, flags);
4805                 goto wait_for_cmpl;
4806         }
4807
4808         abtsiocb = __lpfc_sli_get_iocbq(phba);
4809         if (abtsiocb == NULL) {
4810                 ret = FAILED;
4811                 goto out_unlock_ring;
4812         }
4813
4814         /* Indicate the IO is being aborted by the driver. */
4815         iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4816
4817         /*
4818          * The scsi command can not be in txq and it is in flight because the
4819          * pCmd is still pointig at the SCSI command we have to abort. There
4820          * is no need to search the txcmplq. Just send an abort to the FW.
4821          */
4822
4823         cmd = &iocb->iocb;
4824         icmd = &abtsiocb->iocb;
4825         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4826         icmd->un.acxri.abortContextTag = cmd->ulpContext;
4827         if (phba->sli_rev == LPFC_SLI_REV4)
4828                 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4829         else
4830                 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4831
4832         icmd->ulpLe = 1;
4833         icmd->ulpClass = cmd->ulpClass;
4834
4835         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4836         abtsiocb->hba_wqidx = iocb->hba_wqidx;
4837         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4838         if (iocb->iocb_flag & LPFC_IO_FOF)
4839                 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4840
4841         if (lpfc_is_link_up(phba))
4842                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4843         else
4844                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4845
4846         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4847         abtsiocb->vport = vport;
4848         lpfc_cmd->waitq = &waitq;
4849         if (phba->sli_rev == LPFC_SLI_REV4) {
4850                 /* Note: both hbalock and ring_lock must be set here */
4851                 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4852                                                 abtsiocb, 0);
4853                 spin_unlock(&pring_s4->ring_lock);
4854         } else {
4855                 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4856                                                 abtsiocb, 0);
4857         }
4858
4859         if (ret_val == IOCB_ERROR) {
4860                 /* Indicate the IO is not being aborted by the driver. */
4861                 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4862                 lpfc_cmd->waitq = NULL;
4863                 spin_unlock(&lpfc_cmd->buf_lock);
4864                 spin_unlock_irqrestore(&phba->hbalock, flags);
4865                 lpfc_sli_release_iocbq(phba, abtsiocb);
4866                 ret = FAILED;
4867                 goto out;
4868         }
4869
4870         /* no longer need the lock after this point */
4871         spin_unlock(&lpfc_cmd->buf_lock);
4872         spin_unlock_irqrestore(&phba->hbalock, flags);
4873
4874         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4875                 lpfc_sli_handle_fast_ring_event(phba,
4876                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4877
4878 wait_for_cmpl:
4879         /* Wait for abort to complete */
4880         wait_event_timeout(waitq,
4881                           (lpfc_cmd->pCmd != cmnd),
4882                            msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4883
4884         spin_lock(&lpfc_cmd->buf_lock);
4885
4886         if (lpfc_cmd->pCmd == cmnd) {
4887                 ret = FAILED;
4888                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4889                                  "0748 abort handler timed out waiting "
4890                                  "for aborting I/O (xri:x%x) to complete: "
4891                                  "ret %#x, ID %d, LUN %llu\n",
4892                                  iocb->sli4_xritag, ret,
4893                                  cmnd->device->id, cmnd->device->lun);
4894         }
4895
4896         lpfc_cmd->waitq = NULL;
4897
4898         spin_unlock(&lpfc_cmd->buf_lock);
4899         goto out;
4900
4901 out_unlock_ring:
4902         if (phba->sli_rev == LPFC_SLI_REV4)
4903                 spin_unlock(&pring_s4->ring_lock);
4904 out_unlock_buf:
4905         spin_unlock(&lpfc_cmd->buf_lock);
4906 out_unlock:
4907         spin_unlock_irqrestore(&phba->hbalock, flags);
4908 out:
4909         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4910                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4911                          "LUN %llu\n", ret, cmnd->device->id,
4912                          cmnd->device->lun);
4913         return ret;
4914 }
4915
4916 static char *
4917 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4918 {
4919         switch (task_mgmt_cmd) {
4920         case FCP_ABORT_TASK_SET:
4921                 return "ABORT_TASK_SET";
4922         case FCP_CLEAR_TASK_SET:
4923                 return "FCP_CLEAR_TASK_SET";
4924         case FCP_BUS_RESET:
4925                 return "FCP_BUS_RESET";
4926         case FCP_LUN_RESET:
4927                 return "FCP_LUN_RESET";
4928         case FCP_TARGET_RESET:
4929                 return "FCP_TARGET_RESET";
4930         case FCP_CLEAR_ACA:
4931                 return "FCP_CLEAR_ACA";
4932         case FCP_TERMINATE_TASK:
4933                 return "FCP_TERMINATE_TASK";
4934         default:
4935                 return "unknown";
4936         }
4937 }
4938
4939
4940 /**
4941  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4942  * @vport: The virtual port for which this call is being executed.
4943  * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4944  *
4945  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4946  *
4947  * Return code :
4948  *   0x2003 - Error
4949  *   0x2002 - Success
4950  **/
4951 static int
4952 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4953 {
4954         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4955         uint32_t rsp_info;
4956         uint32_t rsp_len;
4957         uint8_t  rsp_info_code;
4958         int ret = FAILED;
4959
4960
4961         if (fcprsp == NULL)
4962                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4963                                  "0703 fcp_rsp is missing\n");
4964         else {
4965                 rsp_info = fcprsp->rspStatus2;
4966                 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4967                 rsp_info_code = fcprsp->rspInfo3;
4968
4969
4970                 lpfc_printf_vlog(vport, KERN_INFO,
4971                                  LOG_FCP,
4972                                  "0706 fcp_rsp valid 0x%x,"
4973                                  " rsp len=%d code 0x%x\n",
4974                                  rsp_info,
4975                                  rsp_len, rsp_info_code);
4976
4977                 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
4978                  * field specifies the number of valid bytes of FCP_RSP_INFO.
4979                  * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
4980                  */
4981                 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4982                     ((rsp_len == 8) || (rsp_len == 4))) {
4983                         switch (rsp_info_code) {
4984                         case RSP_NO_FAILURE:
4985                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4986                                                  "0715 Task Mgmt No Failure\n");
4987                                 ret = SUCCESS;
4988                                 break;
4989                         case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4990                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4991                                                  "0716 Task Mgmt Target "
4992                                                 "reject\n");
4993                                 break;
4994                         case RSP_TM_NOT_COMPLETED: /* TM failed */
4995                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4996                                                  "0717 Task Mgmt Target "
4997                                                 "failed TM\n");
4998                                 break;
4999                         case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5000                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5001                                                  "0718 Task Mgmt to invalid "
5002                                                 "LUN\n");
5003                                 break;
5004                         }
5005                 }
5006         }
5007         return ret;
5008 }
5009
5010
5011 /**
5012  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5013  * @vport: The virtual port for which this call is being executed.
5014  * @rdata: Pointer to remote port local data
5015  * @tgt_id: Target ID of remote device.
5016  * @lun_id: Lun number for the TMF
5017  * @task_mgmt_cmd: type of TMF to send
5018  *
5019  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5020  * a remote port.
5021  *
5022  * Return Code:
5023  *   0x2003 - Error
5024  *   0x2002 - Success.
5025  **/
5026 static int
5027 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5028                    unsigned int tgt_id, uint64_t lun_id,
5029                    uint8_t task_mgmt_cmd)
5030 {
5031         struct lpfc_hba   *phba = vport->phba;
5032         struct lpfc_io_buf *lpfc_cmd;
5033         struct lpfc_iocbq *iocbq;
5034         struct lpfc_iocbq *iocbqrsp;
5035         struct lpfc_rport_data *rdata;
5036         struct lpfc_nodelist *pnode;
5037         int ret;
5038         int status;
5039
5040         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5041         if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5042                 return FAILED;
5043         pnode = rdata->pnode;
5044
5045         lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5046         if (lpfc_cmd == NULL)
5047                 return FAILED;
5048         lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5049         lpfc_cmd->rdata = rdata;
5050         lpfc_cmd->pCmd = cmnd;
5051         lpfc_cmd->ndlp = pnode;
5052
5053         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5054                                            task_mgmt_cmd);
5055         if (!status) {
5056                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5057                 return FAILED;
5058         }
5059
5060         iocbq = &lpfc_cmd->cur_iocbq;
5061         iocbqrsp = lpfc_sli_get_iocbq(phba);
5062         if (iocbqrsp == NULL) {
5063                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5064                 return FAILED;
5065         }
5066         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5067
5068         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5069                          "0702 Issue %s to TGT %d LUN %llu "
5070                          "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5071                          lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5072                          pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5073                          iocbq->iocb_flag);
5074
5075         status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5076                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
5077         if ((status != IOCB_SUCCESS) ||
5078             (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5079                 if (status != IOCB_SUCCESS ||
5080                     iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5081                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5082                                          "0727 TMF %s to TGT %d LUN %llu "
5083                                          "failed (%d, %d) iocb_flag x%x\n",
5084                                          lpfc_taskmgmt_name(task_mgmt_cmd),
5085                                          tgt_id, lun_id,
5086                                          iocbqrsp->iocb.ulpStatus,
5087                                          iocbqrsp->iocb.un.ulpWord[4],
5088                                          iocbq->iocb_flag);
5089                 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5090                 if (status == IOCB_SUCCESS) {
5091                         if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5092                                 /* Something in the FCP_RSP was invalid.
5093                                  * Check conditions */
5094                                 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5095                         else
5096                                 ret = FAILED;
5097                 } else if (status == IOCB_TIMEDOUT) {
5098                         ret = TIMEOUT_ERROR;
5099                 } else {
5100                         ret = FAILED;
5101                 }
5102         } else
5103                 ret = SUCCESS;
5104
5105         lpfc_sli_release_iocbq(phba, iocbqrsp);
5106
5107         if (ret != TIMEOUT_ERROR)
5108                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5109
5110         return ret;
5111 }
5112
5113 /**
5114  * lpfc_chk_tgt_mapped -
5115  * @vport: The virtual port to check on
5116  * @cmnd: Pointer to scsi_cmnd data structure.
5117  *
5118  * This routine delays until the scsi target (aka rport) for the
5119  * command exists (is present and logged in) or we declare it non-existent.
5120  *
5121  * Return code :
5122  *  0x2003 - Error
5123  *  0x2002 - Success
5124  **/
5125 static int
5126 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5127 {
5128         struct lpfc_rport_data *rdata;
5129         struct lpfc_nodelist *pnode;
5130         unsigned long later;
5131
5132         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5133         if (!rdata) {
5134                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5135                         "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5136                 return FAILED;
5137         }
5138         pnode = rdata->pnode;
5139         /*
5140          * If target is not in a MAPPED state, delay until
5141          * target is rediscovered or devloss timeout expires.
5142          */
5143         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5144         while (time_after(later, jiffies)) {
5145                 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5146                         return FAILED;
5147                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5148                         return SUCCESS;
5149                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5150                 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5151                 if (!rdata)
5152                         return FAILED;
5153                 pnode = rdata->pnode;
5154         }
5155         if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5156             (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5157                 return FAILED;
5158         return SUCCESS;
5159 }
5160
5161 /**
5162  * lpfc_reset_flush_io_context -
5163  * @vport: The virtual port (scsi_host) for the flush context
5164  * @tgt_id: If aborting by Target contect - specifies the target id
5165  * @lun_id: If aborting by Lun context - specifies the lun id
5166  * @context: specifies the context level to flush at.
5167  *
5168  * After a reset condition via TMF, we need to flush orphaned i/o
5169  * contexts from the adapter. This routine aborts any contexts
5170  * outstanding, then waits for their completions. The wait is
5171  * bounded by devloss_tmo though.
5172  *
5173  * Return code :
5174  *  0x2003 - Error
5175  *  0x2002 - Success
5176  **/
5177 static int
5178 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5179                         uint64_t lun_id, lpfc_ctx_cmd context)
5180 {
5181         struct lpfc_hba   *phba = vport->phba;
5182         unsigned long later;
5183         int cnt;
5184
5185         cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5186         if (cnt)
5187                 lpfc_sli_abort_taskmgmt(vport,
5188                                         &phba->sli.sli3_ring[LPFC_FCP_RING],
5189                                         tgt_id, lun_id, context);
5190         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5191         while (time_after(later, jiffies) && cnt) {
5192                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5193                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5194         }
5195         if (cnt) {
5196                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5197                         "0724 I/O flush failure for context %s : cnt x%x\n",
5198                         ((context == LPFC_CTX_LUN) ? "LUN" :
5199                          ((context == LPFC_CTX_TGT) ? "TGT" :
5200                           ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5201                         cnt);
5202                 return FAILED;
5203         }
5204         return SUCCESS;
5205 }
5206
5207 /**
5208  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5209  * @cmnd: Pointer to scsi_cmnd data structure.
5210  *
5211  * This routine does a device reset by sending a LUN_RESET task management
5212  * command.
5213  *
5214  * Return code :
5215  *  0x2003 - Error
5216  *  0x2002 - Success
5217  **/
5218 static int
5219 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5220 {
5221         struct Scsi_Host  *shost = cmnd->device->host;
5222         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5223         struct lpfc_rport_data *rdata;
5224         struct lpfc_nodelist *pnode;
5225         unsigned tgt_id = cmnd->device->id;
5226         uint64_t lun_id = cmnd->device->lun;
5227         struct lpfc_scsi_event_header scsi_event;
5228         int status;
5229
5230         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5231         if (!rdata || !rdata->pnode) {
5232                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5233                                  "0798 Device Reset rdata failure: rdata x%px\n",
5234                                  rdata);
5235                 return FAILED;
5236         }
5237         pnode = rdata->pnode;
5238         status = fc_block_scsi_eh(cmnd);
5239         if (status != 0 && status != SUCCESS)
5240                 return status;
5241
5242         status = lpfc_chk_tgt_mapped(vport, cmnd);
5243         if (status == FAILED) {
5244                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5245                         "0721 Device Reset rport failure: rdata x%px\n", rdata);
5246                 return FAILED;
5247         }
5248
5249         scsi_event.event_type = FC_REG_SCSI_EVENT;
5250         scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5251         scsi_event.lun = lun_id;
5252         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5253         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5254
5255         fc_host_post_vendor_event(shost, fc_get_event_number(),
5256                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5257
5258         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5259                                                 FCP_LUN_RESET);
5260
5261         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5262                          "0713 SCSI layer issued Device Reset (%d, %llu) "
5263                          "return x%x\n", tgt_id, lun_id, status);
5264
5265         /*
5266          * We have to clean up i/o as : they may be orphaned by the TMF;
5267          * or if the TMF failed, they may be in an indeterminate state.
5268          * So, continue on.
5269          * We will report success if all the i/o aborts successfully.
5270          */
5271         if (status == SUCCESS)
5272                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5273                                                 LPFC_CTX_LUN);
5274
5275         return status;
5276 }
5277
5278 /**
5279  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5280  * @cmnd: Pointer to scsi_cmnd data structure.
5281  *
5282  * This routine does a target reset by sending a TARGET_RESET task management
5283  * command.
5284  *
5285  * Return code :
5286  *  0x2003 - Error
5287  *  0x2002 - Success
5288  **/
5289 static int
5290 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5291 {
5292         struct Scsi_Host  *shost = cmnd->device->host;
5293         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5294         struct lpfc_rport_data *rdata;
5295         struct lpfc_nodelist *pnode;
5296         unsigned tgt_id = cmnd->device->id;
5297         uint64_t lun_id = cmnd->device->lun;
5298         struct lpfc_scsi_event_header scsi_event;
5299         int status;
5300
5301         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5302         if (!rdata || !rdata->pnode) {
5303                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5304                                  "0799 Target Reset rdata failure: rdata x%px\n",
5305                                  rdata);
5306                 return FAILED;
5307         }
5308         pnode = rdata->pnode;
5309         status = fc_block_scsi_eh(cmnd);
5310         if (status != 0 && status != SUCCESS)
5311                 return status;
5312
5313         status = lpfc_chk_tgt_mapped(vport, cmnd);
5314         if (status == FAILED) {
5315                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5316                         "0722 Target Reset rport failure: rdata x%px\n", rdata);
5317                 if (pnode) {
5318                         spin_lock_irq(shost->host_lock);
5319                         pnode->nlp_flag &= ~NLP_NPR_ADISC;
5320                         pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5321                         spin_unlock_irq(shost->host_lock);
5322                 }
5323                 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5324                                           LPFC_CTX_TGT);
5325                 return FAST_IO_FAIL;
5326         }
5327
5328         scsi_event.event_type = FC_REG_SCSI_EVENT;
5329         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5330         scsi_event.lun = 0;
5331         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5332         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5333
5334         fc_host_post_vendor_event(shost, fc_get_event_number(),
5335                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5336
5337         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5338                                         FCP_TARGET_RESET);
5339
5340         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5341                          "0723 SCSI layer issued Target Reset (%d, %llu) "
5342                          "return x%x\n", tgt_id, lun_id, status);
5343
5344         /*
5345          * We have to clean up i/o as : they may be orphaned by the TMF;
5346          * or if the TMF failed, they may be in an indeterminate state.
5347          * So, continue on.
5348          * We will report success if all the i/o aborts successfully.
5349          */
5350         if (status == SUCCESS)
5351                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5352                                           LPFC_CTX_TGT);
5353         return status;
5354 }
5355
5356 /**
5357  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5358  * @cmnd: Pointer to scsi_cmnd data structure.
5359  *
5360  * This routine does target reset to all targets on @cmnd->device->host.
5361  * This emulates Parallel SCSI Bus Reset Semantics.
5362  *
5363  * Return code :
5364  *  0x2003 - Error
5365  *  0x2002 - Success
5366  **/
5367 static int
5368 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5369 {
5370         struct Scsi_Host  *shost = cmnd->device->host;
5371         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5372         struct lpfc_nodelist *ndlp = NULL;
5373         struct lpfc_scsi_event_header scsi_event;
5374         int match;
5375         int ret = SUCCESS, status, i;
5376
5377         scsi_event.event_type = FC_REG_SCSI_EVENT;
5378         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5379         scsi_event.lun = 0;
5380         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5381         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5382
5383         fc_host_post_vendor_event(shost, fc_get_event_number(),
5384                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5385
5386         status = fc_block_scsi_eh(cmnd);
5387         if (status != 0 && status != SUCCESS)
5388                 return status;
5389
5390         /*
5391          * Since the driver manages a single bus device, reset all
5392          * targets known to the driver.  Should any target reset
5393          * fail, this routine returns failure to the midlayer.
5394          */
5395         for (i = 0; i < LPFC_MAX_TARGET; i++) {
5396                 /* Search for mapped node by target ID */
5397                 match = 0;
5398                 spin_lock_irq(shost->host_lock);
5399                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5400                         if (!NLP_CHK_NODE_ACT(ndlp))
5401                                 continue;
5402                         if (vport->phba->cfg_fcp2_no_tgt_reset &&
5403                             (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5404                                 continue;
5405                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5406                             ndlp->nlp_sid == i &&
5407                             ndlp->rport &&
5408                             ndlp->nlp_type & NLP_FCP_TARGET) {
5409                                 match = 1;
5410                                 break;
5411                         }
5412                 }
5413                 spin_unlock_irq(shost->host_lock);
5414                 if (!match)
5415                         continue;
5416
5417                 status = lpfc_send_taskmgmt(vport, cmnd,
5418                                         i, 0, FCP_TARGET_RESET);
5419
5420                 if (status != SUCCESS) {
5421                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5422                                          "0700 Bus Reset on target %d failed\n",
5423                                          i);
5424                         ret = FAILED;
5425                 }
5426         }
5427         /*
5428          * We have to clean up i/o as : they may be orphaned by the TMFs
5429          * above; or if any of the TMFs failed, they may be in an
5430          * indeterminate state.
5431          * We will report success if all the i/o aborts successfully.
5432          */
5433
5434         status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5435         if (status != SUCCESS)
5436                 ret = FAILED;
5437
5438         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5439                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5440         return ret;
5441 }
5442
5443 /**
5444  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5445  * @cmnd: Pointer to scsi_cmnd data structure.
5446  *
5447  * This routine does host reset to the adaptor port. It brings the HBA
5448  * offline, performs a board restart, and then brings the board back online.
5449  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5450  * reject all outstanding SCSI commands to the host and error returned
5451  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5452  * of error handling, it will only return error if resetting of the adapter
5453  * is not successful; in all other cases, will return success.
5454  *
5455  * Return code :
5456  *  0x2003 - Error
5457  *  0x2002 - Success
5458  **/
5459 static int
5460 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5461 {
5462         struct Scsi_Host *shost = cmnd->device->host;
5463         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5464         struct lpfc_hba *phba = vport->phba;
5465         int rc, ret = SUCCESS;
5466
5467         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5468                          "3172 SCSI layer issued Host Reset Data:\n");
5469
5470         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5471         lpfc_offline(phba);
5472         rc = lpfc_sli_brdrestart(phba);
5473         if (rc)
5474                 goto error;
5475
5476         rc = lpfc_online(phba);
5477         if (rc)
5478                 goto error;
5479
5480         lpfc_unblock_mgmt_io(phba);
5481
5482         return ret;
5483 error:
5484         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5485                          "3323 Failed host reset\n");
5486         lpfc_unblock_mgmt_io(phba);
5487         return FAILED;
5488 }
5489
5490 /**
5491  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5492  * @sdev: Pointer to scsi_device.
5493  *
5494  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5495  * globally available list of scsi buffers. This routine also makes sure scsi
5496  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5497  * of scsi buffer exists for the lifetime of the driver.
5498  *
5499  * Return codes:
5500  *   non-0 - Error
5501  *   0 - Success
5502  **/
5503 static int
5504 lpfc_slave_alloc(struct scsi_device *sdev)
5505 {
5506         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5507         struct lpfc_hba   *phba = vport->phba;
5508         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5509         uint32_t total = 0;
5510         uint32_t num_to_alloc = 0;
5511         int num_allocated = 0;
5512         uint32_t sdev_cnt;
5513         struct lpfc_device_data *device_data;
5514         unsigned long flags;
5515         struct lpfc_name target_wwpn;
5516
5517         if (!rport || fc_remote_port_chkready(rport))
5518                 return -ENXIO;
5519
5520         if (phba->cfg_fof) {
5521
5522                 /*
5523                  * Check to see if the device data structure for the lun
5524                  * exists.  If not, create one.
5525                  */
5526
5527                 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5528                 spin_lock_irqsave(&phba->devicelock, flags);
5529                 device_data = __lpfc_get_device_data(phba,
5530                                                      &phba->luns,
5531                                                      &vport->fc_portname,
5532                                                      &target_wwpn,
5533                                                      sdev->lun);
5534                 if (!device_data) {
5535                         spin_unlock_irqrestore(&phba->devicelock, flags);
5536                         device_data = lpfc_create_device_data(phba,
5537                                                         &vport->fc_portname,
5538                                                         &target_wwpn,
5539                                                         sdev->lun,
5540                                                         phba->cfg_XLanePriority,
5541                                                         true);
5542                         if (!device_data)
5543                                 return -ENOMEM;
5544                         spin_lock_irqsave(&phba->devicelock, flags);
5545                         list_add_tail(&device_data->listentry, &phba->luns);
5546                 }
5547                 device_data->rport_data = rport->dd_data;
5548                 device_data->available = true;
5549                 spin_unlock_irqrestore(&phba->devicelock, flags);
5550                 sdev->hostdata = device_data;
5551         } else {
5552                 sdev->hostdata = rport->dd_data;
5553         }
5554         sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5555
5556         /* For SLI4, all IO buffers are pre-allocated */
5557         if (phba->sli_rev == LPFC_SLI_REV4)
5558                 return 0;
5559
5560         /* This code path is now ONLY for SLI3 adapters */
5561
5562         /*
5563          * Populate the cmds_per_lun count scsi_bufs into this host's globally
5564          * available list of scsi buffers.  Don't allocate more than the
5565          * HBA limit conveyed to the midlayer via the host structure.  The
5566          * formula accounts for the lun_queue_depth + error handlers + 1
5567          * extra.  This list of scsi bufs exists for the lifetime of the driver.
5568          */
5569         total = phba->total_scsi_bufs;
5570         num_to_alloc = vport->cfg_lun_queue_depth + 2;
5571
5572         /* If allocated buffers are enough do nothing */
5573         if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5574                 return 0;
5575
5576         /* Allow some exchanges to be available always to complete discovery */
5577         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5578                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5579                                  "0704 At limitation of %d preallocated "
5580                                  "command buffers\n", total);
5581                 return 0;
5582         /* Allow some exchanges to be available always to complete discovery */
5583         } else if (total + num_to_alloc >
5584                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5585                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5586                                  "0705 Allocation request of %d "
5587                                  "command buffers will exceed max of %d.  "
5588                                  "Reducing allocation request to %d.\n",
5589                                  num_to_alloc, phba->cfg_hba_queue_depth,
5590                                  (phba->cfg_hba_queue_depth - total));
5591                 num_to_alloc = phba->cfg_hba_queue_depth - total;
5592         }
5593         num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5594         if (num_to_alloc != num_allocated) {
5595                         lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5596                                          "0708 Allocation request of %d "
5597                                          "command buffers did not succeed.  "
5598                                          "Allocated %d buffers.\n",
5599                                          num_to_alloc, num_allocated);
5600         }
5601         if (num_allocated > 0)
5602                 phba->total_scsi_bufs += num_allocated;
5603         return 0;
5604 }
5605
5606 /**
5607  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5608  * @sdev: Pointer to scsi_device.
5609  *
5610  * This routine configures following items
5611  *   - Tag command queuing support for @sdev if supported.
5612  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5613  *
5614  * Return codes:
5615  *   0 - Success
5616  **/
5617 static int
5618 lpfc_slave_configure(struct scsi_device *sdev)
5619 {
5620         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5621         struct lpfc_hba   *phba = vport->phba;
5622
5623         scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5624
5625         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5626                 lpfc_sli_handle_fast_ring_event(phba,
5627                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5628                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5629                         lpfc_poll_rearm_timer(phba);
5630         }
5631
5632         return 0;
5633 }
5634
5635 /**
5636  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5637  * @sdev: Pointer to scsi_device.
5638  *
5639  * This routine sets @sdev hostatdata filed to null.
5640  **/
5641 static void
5642 lpfc_slave_destroy(struct scsi_device *sdev)
5643 {
5644         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5645         struct lpfc_hba   *phba = vport->phba;
5646         unsigned long flags;
5647         struct lpfc_device_data *device_data = sdev->hostdata;
5648
5649         atomic_dec(&phba->sdev_cnt);
5650         if ((phba->cfg_fof) && (device_data)) {
5651                 spin_lock_irqsave(&phba->devicelock, flags);
5652                 device_data->available = false;
5653                 if (!device_data->oas_enabled)
5654                         lpfc_delete_device_data(phba, device_data);
5655                 spin_unlock_irqrestore(&phba->devicelock, flags);
5656         }
5657         sdev->hostdata = NULL;
5658         return;
5659 }
5660
5661 /**
5662  * lpfc_create_device_data - creates and initializes device data structure for OAS
5663  * @pha: Pointer to host bus adapter structure.
5664  * @vport_wwpn: Pointer to vport's wwpn information
5665  * @target_wwpn: Pointer to target's wwpn information
5666  * @lun: Lun on target
5667  * @atomic_create: Flag to indicate if memory should be allocated using the
5668  *                GFP_ATOMIC flag or not.
5669  *
5670  * This routine creates a device data structure which will contain identifying
5671  * information for the device (host wwpn, target wwpn, lun), state of OAS,
5672  * whether or not the corresponding lun is available by the system,
5673  * and pointer to the rport data.
5674  *
5675  * Return codes:
5676  *   NULL - Error
5677  *   Pointer to lpfc_device_data - Success
5678  **/
5679 struct lpfc_device_data*
5680 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5681                         struct lpfc_name *target_wwpn, uint64_t lun,
5682                         uint32_t pri, bool atomic_create)
5683 {
5684
5685         struct lpfc_device_data *lun_info;
5686         int memory_flags;
5687
5688         if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5689             !(phba->cfg_fof))
5690                 return NULL;
5691
5692         /* Attempt to create the device data to contain lun info */
5693
5694         if (atomic_create)
5695                 memory_flags = GFP_ATOMIC;
5696         else
5697                 memory_flags = GFP_KERNEL;
5698         lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5699         if (!lun_info)
5700                 return NULL;
5701         INIT_LIST_HEAD(&lun_info->listentry);
5702         lun_info->rport_data  = NULL;
5703         memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5704                sizeof(struct lpfc_name));
5705         memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5706                sizeof(struct lpfc_name));
5707         lun_info->device_id.lun = lun;
5708         lun_info->oas_enabled = false;
5709         lun_info->priority = pri;
5710         lun_info->available = false;
5711         return lun_info;
5712 }
5713
5714 /**
5715  * lpfc_delete_device_data - frees a device data structure for OAS
5716  * @pha: Pointer to host bus adapter structure.
5717  * @lun_info: Pointer to device data structure to free.
5718  *
5719  * This routine frees the previously allocated device data structure passed.
5720  *
5721  **/
5722 void
5723 lpfc_delete_device_data(struct lpfc_hba *phba,
5724                         struct lpfc_device_data *lun_info)
5725 {
5726
5727         if (unlikely(!phba) || !lun_info  ||
5728             !(phba->cfg_fof))
5729                 return;
5730
5731         if (!list_empty(&lun_info->listentry))
5732                 list_del(&lun_info->listentry);
5733         mempool_free(lun_info, phba->device_data_mem_pool);
5734         return;
5735 }
5736
5737 /**
5738  * __lpfc_get_device_data - returns the device data for the specified lun
5739  * @pha: Pointer to host bus adapter structure.
5740  * @list: Point to list to search.
5741  * @vport_wwpn: Pointer to vport's wwpn information
5742  * @target_wwpn: Pointer to target's wwpn information
5743  * @lun: Lun on target
5744  *
5745  * This routine searches the list passed for the specified lun's device data.
5746  * This function does not hold locks, it is the responsibility of the caller
5747  * to ensure the proper lock is held before calling the function.
5748  *
5749  * Return codes:
5750  *   NULL - Error
5751  *   Pointer to lpfc_device_data - Success
5752  **/
5753 struct lpfc_device_data*
5754 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5755                        struct lpfc_name *vport_wwpn,
5756                        struct lpfc_name *target_wwpn, uint64_t lun)
5757 {
5758
5759         struct lpfc_device_data *lun_info;
5760
5761         if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5762             !phba->cfg_fof)
5763                 return NULL;
5764
5765         /* Check to see if the lun is already enabled for OAS. */
5766
5767         list_for_each_entry(lun_info, list, listentry) {
5768                 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5769                             sizeof(struct lpfc_name)) == 0) &&
5770                     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5771                             sizeof(struct lpfc_name)) == 0) &&
5772                     (lun_info->device_id.lun == lun))
5773                         return lun_info;
5774         }
5775
5776         return NULL;
5777 }
5778
5779 /**
5780  * lpfc_find_next_oas_lun - searches for the next oas lun
5781  * @pha: Pointer to host bus adapter structure.
5782  * @vport_wwpn: Pointer to vport's wwpn information
5783  * @target_wwpn: Pointer to target's wwpn information
5784  * @starting_lun: Pointer to the lun to start searching for
5785  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5786  * @found_target_wwpn: Pointer to the found lun's target wwpn information
5787  * @found_lun: Pointer to the found lun.
5788  * @found_lun_status: Pointer to status of the found lun.
5789  *
5790  * This routine searches the luns list for the specified lun
5791  * or the first lun for the vport/target.  If the vport wwpn contains
5792  * a zero value then a specific vport is not specified. In this case
5793  * any vport which contains the lun will be considered a match.  If the
5794  * target wwpn contains a zero value then a specific target is not specified.
5795  * In this case any target which contains the lun will be considered a
5796  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
5797  * are returned.  The function will also return the next lun if available.
5798  * If the next lun is not found, starting_lun parameter will be set to
5799  * NO_MORE_OAS_LUN.
5800  *
5801  * Return codes:
5802  *   non-0 - Error
5803  *   0 - Success
5804  **/
5805 bool
5806 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5807                        struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5808                        struct lpfc_name *found_vport_wwpn,
5809                        struct lpfc_name *found_target_wwpn,
5810                        uint64_t *found_lun,
5811                        uint32_t *found_lun_status,
5812                        uint32_t *found_lun_pri)
5813 {
5814
5815         unsigned long flags;
5816         struct lpfc_device_data *lun_info;
5817         struct lpfc_device_id *device_id;
5818         uint64_t lun;
5819         bool found = false;
5820
5821         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5822             !starting_lun || !found_vport_wwpn ||
5823             !found_target_wwpn || !found_lun || !found_lun_status ||
5824             (*starting_lun == NO_MORE_OAS_LUN) ||
5825             !phba->cfg_fof)
5826                 return false;
5827
5828         lun = *starting_lun;
5829         *found_lun = NO_MORE_OAS_LUN;
5830         *starting_lun = NO_MORE_OAS_LUN;
5831
5832         /* Search for lun or the lun closet in value */
5833
5834         spin_lock_irqsave(&phba->devicelock, flags);
5835         list_for_each_entry(lun_info, &phba->luns, listentry) {
5836                 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5837                      (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5838                             sizeof(struct lpfc_name)) == 0)) &&
5839                     ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5840                      (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5841                             sizeof(struct lpfc_name)) == 0)) &&
5842                     (lun_info->oas_enabled)) {
5843                         device_id = &lun_info->device_id;
5844                         if ((!found) &&
5845                             ((lun == FIND_FIRST_OAS_LUN) ||
5846                              (device_id->lun == lun))) {
5847                                 *found_lun = device_id->lun;
5848                                 memcpy(found_vport_wwpn,
5849                                        &device_id->vport_wwpn,
5850                                        sizeof(struct lpfc_name));
5851                                 memcpy(found_target_wwpn,
5852                                        &device_id->target_wwpn,
5853                                        sizeof(struct lpfc_name));
5854                                 if (lun_info->available)
5855                                         *found_lun_status =
5856                                                 OAS_LUN_STATUS_EXISTS;
5857                                 else
5858                                         *found_lun_status = 0;
5859                                 *found_lun_pri = lun_info->priority;
5860                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5861                                         memset(vport_wwpn, 0x0,
5862                                                sizeof(struct lpfc_name));
5863                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5864                                         memset(target_wwpn, 0x0,
5865                                                sizeof(struct lpfc_name));
5866                                 found = true;
5867                         } else if (found) {
5868                                 *starting_lun = device_id->lun;
5869                                 memcpy(vport_wwpn, &device_id->vport_wwpn,
5870                                        sizeof(struct lpfc_name));
5871                                 memcpy(target_wwpn, &device_id->target_wwpn,
5872                                        sizeof(struct lpfc_name));
5873                                 break;
5874                         }
5875                 }
5876         }
5877         spin_unlock_irqrestore(&phba->devicelock, flags);
5878         return found;
5879 }
5880
5881 /**
5882  * lpfc_enable_oas_lun - enables a lun for OAS operations
5883  * @pha: Pointer to host bus adapter structure.
5884  * @vport_wwpn: Pointer to vport's wwpn information
5885  * @target_wwpn: Pointer to target's wwpn information
5886  * @lun: Lun
5887  *
5888  * This routine enables a lun for oas operations.  The routines does so by
5889  * doing the following :
5890  *
5891  *   1) Checks to see if the device data for the lun has been created.
5892  *   2) If found, sets the OAS enabled flag if not set and returns.
5893  *   3) Otherwise, creates a device data structure.
5894  *   4) If successfully created, indicates the device data is for an OAS lun,
5895  *   indicates the lun is not available and add to the list of luns.
5896  *
5897  * Return codes:
5898  *   false - Error
5899  *   true - Success
5900  **/
5901 bool
5902 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5903                     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5904 {
5905
5906         struct lpfc_device_data *lun_info;
5907         unsigned long flags;
5908
5909         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5910             !phba->cfg_fof)
5911                 return false;
5912
5913         spin_lock_irqsave(&phba->devicelock, flags);
5914
5915         /* Check to see if the device data for the lun has been created */
5916         lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5917                                           target_wwpn, lun);
5918         if (lun_info) {
5919                 if (!lun_info->oas_enabled)
5920                         lun_info->oas_enabled = true;
5921                 lun_info->priority = pri;
5922                 spin_unlock_irqrestore(&phba->devicelock, flags);
5923                 return true;
5924         }
5925
5926         /* Create an lun info structure and add to list of luns */
5927         lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5928                                            pri, true);
5929         if (lun_info) {
5930                 lun_info->oas_enabled = true;
5931                 lun_info->priority = pri;
5932                 lun_info->available = false;
5933                 list_add_tail(&lun_info->listentry, &phba->luns);
5934                 spin_unlock_irqrestore(&phba->devicelock, flags);
5935                 return true;
5936         }
5937         spin_unlock_irqrestore(&phba->devicelock, flags);
5938         return false;
5939 }
5940
5941 /**
5942  * lpfc_disable_oas_lun - disables a lun for OAS operations
5943  * @pha: Pointer to host bus adapter structure.
5944  * @vport_wwpn: Pointer to vport's wwpn information
5945  * @target_wwpn: Pointer to target's wwpn information
5946  * @lun: Lun
5947  *
5948  * This routine disables a lun for oas operations.  The routines does so by
5949  * doing the following :
5950  *
5951  *   1) Checks to see if the device data for the lun is created.
5952  *   2) If present, clears the flag indicating this lun is for OAS.
5953  *   3) If the lun is not available by the system, the device data is
5954  *   freed.
5955  *
5956  * Return codes:
5957  *   false - Error
5958  *   true - Success
5959  **/
5960 bool
5961 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5962                      struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5963 {
5964
5965         struct lpfc_device_data *lun_info;
5966         unsigned long flags;
5967
5968         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5969             !phba->cfg_fof)
5970                 return false;
5971
5972         spin_lock_irqsave(&phba->devicelock, flags);
5973
5974         /* Check to see if the lun is available. */
5975         lun_info = __lpfc_get_device_data(phba,
5976                                           &phba->luns, vport_wwpn,
5977                                           target_wwpn, lun);
5978         if (lun_info) {
5979                 lun_info->oas_enabled = false;
5980                 lun_info->priority = pri;
5981                 if (!lun_info->available)
5982                         lpfc_delete_device_data(phba, lun_info);
5983                 spin_unlock_irqrestore(&phba->devicelock, flags);
5984                 return true;
5985         }
5986
5987         spin_unlock_irqrestore(&phba->devicelock, flags);
5988         return false;
5989 }
5990
5991 static int
5992 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5993 {
5994         return SCSI_MLQUEUE_HOST_BUSY;
5995 }
5996
5997 static int
5998 lpfc_no_handler(struct scsi_cmnd *cmnd)
5999 {
6000         return FAILED;
6001 }
6002
6003 static int
6004 lpfc_no_slave(struct scsi_device *sdev)
6005 {
6006         return -ENODEV;
6007 }
6008
6009 struct scsi_host_template lpfc_template_nvme = {
6010         .module                 = THIS_MODULE,
6011         .name                   = LPFC_DRIVER_NAME,
6012         .proc_name              = LPFC_DRIVER_NAME,
6013         .info                   = lpfc_info,
6014         .queuecommand           = lpfc_no_command,
6015         .eh_abort_handler       = lpfc_no_handler,
6016         .eh_device_reset_handler = lpfc_no_handler,
6017         .eh_target_reset_handler = lpfc_no_handler,
6018         .eh_bus_reset_handler   = lpfc_no_handler,
6019         .eh_host_reset_handler  = lpfc_no_handler,
6020         .slave_alloc            = lpfc_no_slave,
6021         .slave_configure        = lpfc_no_slave,
6022         .scan_finished          = lpfc_scan_finished,
6023         .this_id                = -1,
6024         .sg_tablesize           = 1,
6025         .cmd_per_lun            = 1,
6026         .shost_attrs            = lpfc_hba_attrs,
6027         .max_sectors            = 0xFFFF,
6028         .vendor_id              = LPFC_NL_VENDOR_ID,
6029         .track_queue_depth      = 0,
6030 };
6031
6032 struct scsi_host_template lpfc_template = {
6033         .module                 = THIS_MODULE,
6034         .name                   = LPFC_DRIVER_NAME,
6035         .proc_name              = LPFC_DRIVER_NAME,
6036         .info                   = lpfc_info,
6037         .queuecommand           = lpfc_queuecommand,
6038         .eh_timed_out           = fc_eh_timed_out,
6039         .eh_abort_handler       = lpfc_abort_handler,
6040         .eh_device_reset_handler = lpfc_device_reset_handler,
6041         .eh_target_reset_handler = lpfc_target_reset_handler,
6042         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
6043         .eh_host_reset_handler  = lpfc_host_reset_handler,
6044         .slave_alloc            = lpfc_slave_alloc,
6045         .slave_configure        = lpfc_slave_configure,
6046         .slave_destroy          = lpfc_slave_destroy,
6047         .scan_finished          = lpfc_scan_finished,
6048         .this_id                = -1,
6049         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
6050         .cmd_per_lun            = LPFC_CMD_PER_LUN,
6051         .shost_attrs            = lpfc_hba_attrs,
6052         .max_sectors            = 0xFFFF,
6053         .vendor_id              = LPFC_NL_VENDOR_ID,
6054         .change_queue_depth     = scsi_change_queue_depth,
6055         .track_queue_depth      = 1,
6056 };