GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT  2
54 #define LPFC_ABORT_WAIT  2
55
56 int _dump_buf_done = 1;
57
58 static char *dif_op_str[] = {
59         "PROT_NORMAL",
60         "PROT_READ_INSERT",
61         "PROT_WRITE_STRIP",
62         "PROT_READ_STRIP",
63         "PROT_WRITE_INSERT",
64         "PROT_READ_PASS",
65         "PROT_WRITE_PASS",
66 };
67
68 struct scsi_dif_tuple {
69         __be16 guard_tag;       /* Checksum */
70         __be16 app_tag;         /* Opaque storage */
71         __be32 ref_tag;         /* Target LBA or indirect LBA */
72 };
73
74 static struct lpfc_rport_data *
75 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76 {
77         struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78
79         if (vport->phba->cfg_fof)
80                 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81         else
82                 return (struct lpfc_rport_data *)sdev->hostdata;
83 }
84
85 static void
86 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
87 static void
88 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
89 static int
90 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91
92 static void
93 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
94 {
95         void *src, *dst;
96         struct scatterlist *sgde = scsi_sglist(cmnd);
97
98         if (!_dump_buf_data) {
99                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
100                         "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
101                                 __func__);
102                 return;
103         }
104
105
106         if (!sgde) {
107                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
108                         "9051 BLKGRD: ERROR: data scatterlist is null\n");
109                 return;
110         }
111
112         dst = (void *) _dump_buf_data;
113         while (sgde) {
114                 src = sg_virt(sgde);
115                 memcpy(dst, src, sgde->length);
116                 dst += sgde->length;
117                 sgde = sg_next(sgde);
118         }
119 }
120
121 static void
122 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
123 {
124         void *src, *dst;
125         struct scatterlist *sgde = scsi_prot_sglist(cmnd);
126
127         if (!_dump_buf_dif) {
128                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
129                         "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
130                                 __func__);
131                 return;
132         }
133
134         if (!sgde) {
135                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
136                         "9053 BLKGRD: ERROR: prot scatterlist is null\n");
137                 return;
138         }
139
140         dst = _dump_buf_dif;
141         while (sgde) {
142                 src = sg_virt(sgde);
143                 memcpy(dst, src, sgde->length);
144                 dst += sgde->length;
145                 sgde = sg_next(sgde);
146         }
147 }
148
149 static inline unsigned
150 lpfc_cmd_blksize(struct scsi_cmnd *sc)
151 {
152         return sc->device->sector_size;
153 }
154
155 #define LPFC_CHECK_PROTECT_GUARD        1
156 #define LPFC_CHECK_PROTECT_REF          2
157 static inline unsigned
158 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
159 {
160         return 1;
161 }
162
163 static inline unsigned
164 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
165 {
166         if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
167                 return 0;
168         if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
169                 return 1;
170         return 0;
171 }
172
173 /**
174  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
175  * @phba: Pointer to HBA object.
176  * @lpfc_cmd: lpfc scsi command object pointer.
177  *
178  * This function is called from the lpfc_prep_task_mgmt_cmd function to
179  * set the last bit in the response sge entry.
180  **/
181 static void
182 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
183                                 struct lpfc_scsi_buf *lpfc_cmd)
184 {
185         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
186         if (sgl) {
187                 sgl += 1;
188                 sgl->word2 = le32_to_cpu(sgl->word2);
189                 bf_set(lpfc_sli4_sge_last, sgl, 1);
190                 sgl->word2 = cpu_to_le32(sgl->word2);
191         }
192 }
193
194 /**
195  * lpfc_update_stats - Update statistical data for the command completion
196  * @phba: Pointer to HBA object.
197  * @lpfc_cmd: lpfc scsi command object pointer.
198  *
199  * This function is called when there is a command completion and this
200  * function updates the statistical data for the command completion.
201  **/
202 static void
203 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
204 {
205         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
206         struct lpfc_nodelist *pnode = rdata->pnode;
207         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
208         unsigned long flags;
209         struct Scsi_Host  *shost = cmd->device->host;
210         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
211         unsigned long latency;
212         int i;
213
214         if (cmd->result)
215                 return;
216
217         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
218
219         spin_lock_irqsave(shost->host_lock, flags);
220         if (!vport->stat_data_enabled ||
221                 vport->stat_data_blocked ||
222                 !pnode ||
223                 !pnode->lat_data ||
224                 (phba->bucket_type == LPFC_NO_BUCKET)) {
225                 spin_unlock_irqrestore(shost->host_lock, flags);
226                 return;
227         }
228
229         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
230                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
231                         phba->bucket_step;
232                 /* check array subscript bounds */
233                 if (i < 0)
234                         i = 0;
235                 else if (i >= LPFC_MAX_BUCKET_COUNT)
236                         i = LPFC_MAX_BUCKET_COUNT - 1;
237         } else {
238                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
239                         if (latency <= (phba->bucket_base +
240                                 ((1<<i)*phba->bucket_step)))
241                                 break;
242         }
243
244         pnode->lat_data[i].cmd_count++;
245         spin_unlock_irqrestore(shost->host_lock, flags);
246 }
247
248 /**
249  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
250  * @phba: The Hba for which this call is being executed.
251  *
252  * This routine is called when there is resource error in driver or firmware.
253  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
254  * posts at most 1 event each second. This routine wakes up worker thread of
255  * @phba to process WORKER_RAM_DOWN_EVENT event.
256  *
257  * This routine should be called with no lock held.
258  **/
259 void
260 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
261 {
262         unsigned long flags;
263         uint32_t evt_posted;
264         unsigned long expires;
265
266         spin_lock_irqsave(&phba->hbalock, flags);
267         atomic_inc(&phba->num_rsrc_err);
268         phba->last_rsrc_error_time = jiffies;
269
270         expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
271         if (time_after(expires, jiffies)) {
272                 spin_unlock_irqrestore(&phba->hbalock, flags);
273                 return;
274         }
275
276         phba->last_ramp_down_time = jiffies;
277
278         spin_unlock_irqrestore(&phba->hbalock, flags);
279
280         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
281         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
282         if (!evt_posted)
283                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
284         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
285
286         if (!evt_posted)
287                 lpfc_worker_wake_up(phba);
288         return;
289 }
290
291 /**
292  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
293  * @phba: The Hba for which this call is being executed.
294  *
295  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
296  * thread.This routine reduces queue depth for all scsi device on each vport
297  * associated with @phba.
298  **/
299 void
300 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
301 {
302         struct lpfc_vport **vports;
303         struct Scsi_Host  *shost;
304         struct scsi_device *sdev;
305         unsigned long new_queue_depth;
306         unsigned long num_rsrc_err, num_cmd_success;
307         int i;
308
309         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
310         num_cmd_success = atomic_read(&phba->num_cmd_success);
311
312         /*
313          * The error and success command counters are global per
314          * driver instance.  If another handler has already
315          * operated on this error event, just exit.
316          */
317         if (num_rsrc_err == 0)
318                 return;
319
320         vports = lpfc_create_vport_work_array(phba);
321         if (vports != NULL)
322                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
323                         shost = lpfc_shost_from_vport(vports[i]);
324                         shost_for_each_device(sdev, shost) {
325                                 new_queue_depth =
326                                         sdev->queue_depth * num_rsrc_err /
327                                         (num_rsrc_err + num_cmd_success);
328                                 if (!new_queue_depth)
329                                         new_queue_depth = sdev->queue_depth - 1;
330                                 else
331                                         new_queue_depth = sdev->queue_depth -
332                                                                 new_queue_depth;
333                                 scsi_change_queue_depth(sdev, new_queue_depth);
334                         }
335                 }
336         lpfc_destroy_vport_work_array(phba, vports);
337         atomic_set(&phba->num_rsrc_err, 0);
338         atomic_set(&phba->num_cmd_success, 0);
339 }
340
341 /**
342  * lpfc_scsi_dev_block - set all scsi hosts to block state
343  * @phba: Pointer to HBA context object.
344  *
345  * This function walks vport list and set each SCSI host to block state
346  * by invoking fc_remote_port_delete() routine. This function is invoked
347  * with EEH when device's PCI slot has been permanently disabled.
348  **/
349 void
350 lpfc_scsi_dev_block(struct lpfc_hba *phba)
351 {
352         struct lpfc_vport **vports;
353         struct Scsi_Host  *shost;
354         struct scsi_device *sdev;
355         struct fc_rport *rport;
356         int i;
357
358         vports = lpfc_create_vport_work_array(phba);
359         if (vports != NULL)
360                 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
361                         shost = lpfc_shost_from_vport(vports[i]);
362                         shost_for_each_device(sdev, shost) {
363                                 rport = starget_to_rport(scsi_target(sdev));
364                                 fc_remote_port_delete(rport);
365                         }
366                 }
367         lpfc_destroy_vport_work_array(phba, vports);
368 }
369
370 /**
371  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
372  * @vport: The virtual port for which this call being executed.
373  * @num_to_allocate: The requested number of buffers to allocate.
374  *
375  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
376  * the scsi buffer contains all the necessary information needed to initiate
377  * a SCSI I/O. The non-DMAable buffer region contains information to build
378  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
379  * and the initial BPL. In addition to allocating memory, the FCP CMND and
380  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
381  *
382  * Return codes:
383  *   int - number of scsi buffers that were allocated.
384  *   0 = failure, less than num_to_alloc is a partial failure.
385  **/
386 static int
387 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
388 {
389         struct lpfc_hba *phba = vport->phba;
390         struct lpfc_scsi_buf *psb;
391         struct ulp_bde64 *bpl;
392         IOCB_t *iocb;
393         dma_addr_t pdma_phys_fcp_cmd;
394         dma_addr_t pdma_phys_fcp_rsp;
395         dma_addr_t pdma_phys_bpl;
396         uint16_t iotag;
397         int bcnt, bpl_size;
398
399         bpl_size = phba->cfg_sg_dma_buf_size -
400                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
401
402         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
403                          "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
404                          num_to_alloc, phba->cfg_sg_dma_buf_size,
405                          (int)sizeof(struct fcp_cmnd),
406                          (int)sizeof(struct fcp_rsp), bpl_size);
407
408         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
409                 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
410                 if (!psb)
411                         break;
412
413                 /*
414                  * Get memory from the pci pool to map the virt space to pci
415                  * bus space for an I/O.  The DMA buffer includes space for the
416                  * struct fcp_cmnd, struct fcp_rsp and the number of bde's
417                  * necessary to support the sg_tablesize.
418                  */
419                 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
420                                         GFP_KERNEL, &psb->dma_handle);
421                 if (!psb->data) {
422                         kfree(psb);
423                         break;
424                 }
425
426
427                 /* Allocate iotag for psb->cur_iocbq. */
428                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
429                 if (iotag == 0) {
430                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
431                                       psb->data, psb->dma_handle);
432                         kfree(psb);
433                         break;
434                 }
435                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
436
437                 psb->fcp_cmnd = psb->data;
438                 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
439                 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
440                         sizeof(struct fcp_rsp);
441
442                 /* Initialize local short-hand pointers. */
443                 bpl = psb->fcp_bpl;
444                 pdma_phys_fcp_cmd = psb->dma_handle;
445                 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
446                 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
447                         sizeof(struct fcp_rsp);
448
449                 /*
450                  * The first two bdes are the FCP_CMD and FCP_RSP. The balance
451                  * are sg list bdes.  Initialize the first two and leave the
452                  * rest for queuecommand.
453                  */
454                 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
455                 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
456                 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
457                 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
458                 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
459
460                 /* Setup the physical region for the FCP RSP */
461                 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
462                 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
463                 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
464                 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
465                 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
466
467                 /*
468                  * Since the IOCB for the FCP I/O is built into this
469                  * lpfc_scsi_buf, initialize it with all known data now.
470                  */
471                 iocb = &psb->cur_iocbq.iocb;
472                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
473                 if ((phba->sli_rev == 3) &&
474                                 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
475                         /* fill in immediate fcp command BDE */
476                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
477                         iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
478                         iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
479                                         unsli3.fcp_ext.icd);
480                         iocb->un.fcpi64.bdl.addrHigh = 0;
481                         iocb->ulpBdeCount = 0;
482                         iocb->ulpLe = 0;
483                         /* fill in response BDE */
484                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
485                                                         BUFF_TYPE_BDE_64;
486                         iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
487                                 sizeof(struct fcp_rsp);
488                         iocb->unsli3.fcp_ext.rbde.addrLow =
489                                 putPaddrLow(pdma_phys_fcp_rsp);
490                         iocb->unsli3.fcp_ext.rbde.addrHigh =
491                                 putPaddrHigh(pdma_phys_fcp_rsp);
492                 } else {
493                         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
494                         iocb->un.fcpi64.bdl.bdeSize =
495                                         (2 * sizeof(struct ulp_bde64));
496                         iocb->un.fcpi64.bdl.addrLow =
497                                         putPaddrLow(pdma_phys_bpl);
498                         iocb->un.fcpi64.bdl.addrHigh =
499                                         putPaddrHigh(pdma_phys_bpl);
500                         iocb->ulpBdeCount = 1;
501                         iocb->ulpLe = 1;
502                 }
503                 iocb->ulpClass = CLASS3;
504                 psb->status = IOSTAT_SUCCESS;
505                 /* Put it back into the SCSI buffer list */
506                 psb->cur_iocbq.context1  = psb;
507                 lpfc_release_scsi_buf_s3(phba, psb);
508
509         }
510
511         return bcnt;
512 }
513
514 /**
515  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
516  * @vport: pointer to lpfc vport data structure.
517  *
518  * This routine is invoked by the vport cleanup for deletions and the cleanup
519  * for an ndlp on removal.
520  **/
521 void
522 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
523 {
524         struct lpfc_hba *phba = vport->phba;
525         struct lpfc_scsi_buf *psb, *next_psb;
526         unsigned long iflag = 0;
527
528         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
529                 return;
530         spin_lock_irqsave(&phba->hbalock, iflag);
531         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
532         list_for_each_entry_safe(psb, next_psb,
533                                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
534                 if (psb->rdata && psb->rdata->pnode
535                         && psb->rdata->pnode->vport == vport)
536                         psb->rdata = NULL;
537         }
538         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
539         spin_unlock_irqrestore(&phba->hbalock, iflag);
540 }
541
542 /**
543  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
544  * @phba: pointer to lpfc hba data structure.
545  * @axri: pointer to the fcp xri abort wcqe structure.
546  *
547  * This routine is invoked by the worker thread to process a SLI4 fast-path
548  * FCP aborted xri.
549  **/
550 void
551 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
552                           struct sli4_wcqe_xri_aborted *axri)
553 {
554         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
555         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
556         struct lpfc_scsi_buf *psb, *next_psb;
557         unsigned long iflag = 0;
558         struct lpfc_iocbq *iocbq;
559         int i;
560         struct lpfc_nodelist *ndlp;
561         int rrq_empty = 0;
562         struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
563
564         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
565                 return;
566         spin_lock_irqsave(&phba->hbalock, iflag);
567         spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
568         list_for_each_entry_safe(psb, next_psb,
569                 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
570                 if (psb->cur_iocbq.sli4_xritag == xri) {
571                         list_del(&psb->list);
572                         psb->exch_busy = 0;
573                         psb->status = IOSTAT_SUCCESS;
574                         spin_unlock(
575                                 &phba->sli4_hba.abts_scsi_buf_list_lock);
576                         if (psb->rdata && psb->rdata->pnode)
577                                 ndlp = psb->rdata->pnode;
578                         else
579                                 ndlp = NULL;
580
581                         rrq_empty = list_empty(&phba->active_rrq_list);
582                         spin_unlock_irqrestore(&phba->hbalock, iflag);
583                         if (ndlp) {
584                                 lpfc_set_rrq_active(phba, ndlp,
585                                         psb->cur_iocbq.sli4_lxritag, rxid, 1);
586                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
587                         }
588                         lpfc_release_scsi_buf_s4(phba, psb);
589                         if (rrq_empty)
590                                 lpfc_worker_wake_up(phba);
591                         return;
592                 }
593         }
594         spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
595         for (i = 1; i <= phba->sli.last_iotag; i++) {
596                 iocbq = phba->sli.iocbq_lookup[i];
597
598                 if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
599                         (iocbq->iocb_flag & LPFC_IO_LIBDFC))
600                         continue;
601                 if (iocbq->sli4_xritag != xri)
602                         continue;
603                 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
604                 psb->exch_busy = 0;
605                 spin_unlock_irqrestore(&phba->hbalock, iflag);
606                 if (!list_empty(&pring->txq))
607                         lpfc_worker_wake_up(phba);
608                 return;
609
610         }
611         spin_unlock_irqrestore(&phba->hbalock, iflag);
612 }
613
614 /**
615  * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
616  * @phba: pointer to lpfc hba data structure.
617  * @post_sblist: pointer to the scsi buffer list.
618  *
619  * This routine walks a list of scsi buffers that was passed in. It attempts
620  * to construct blocks of scsi buffer sgls which contains contiguous xris and
621  * uses the non-embedded SGL block post mailbox commands to post to the port.
622  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
623  * embedded SGL post mailbox command for posting. The @post_sblist passed in
624  * must be local list, thus no lock is needed when manipulate the list.
625  *
626  * Returns: 0 = failure, non-zero number of successfully posted buffers.
627  **/
628 static int
629 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
630                              struct list_head *post_sblist, int sb_count)
631 {
632         struct lpfc_scsi_buf *psb, *psb_next;
633         int status, sgl_size;
634         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
635         dma_addr_t pdma_phys_bpl1;
636         int last_xritag = NO_XRI;
637         LIST_HEAD(prep_sblist);
638         LIST_HEAD(blck_sblist);
639         LIST_HEAD(scsi_sblist);
640
641         /* sanity check */
642         if (sb_count <= 0)
643                 return -EINVAL;
644
645         sgl_size = phba->cfg_sg_dma_buf_size -
646                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
647
648         list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
649                 list_del_init(&psb->list);
650                 block_cnt++;
651                 if ((last_xritag != NO_XRI) &&
652                     (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
653                         /* a hole in xri block, form a sgl posting block */
654                         list_splice_init(&prep_sblist, &blck_sblist);
655                         post_cnt = block_cnt - 1;
656                         /* prepare list for next posting block */
657                         list_add_tail(&psb->list, &prep_sblist);
658                         block_cnt = 1;
659                 } else {
660                         /* prepare list for next posting block */
661                         list_add_tail(&psb->list, &prep_sblist);
662                         /* enough sgls for non-embed sgl mbox command */
663                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
664                                 list_splice_init(&prep_sblist, &blck_sblist);
665                                 post_cnt = block_cnt;
666                                 block_cnt = 0;
667                         }
668                 }
669                 num_posting++;
670                 last_xritag = psb->cur_iocbq.sli4_xritag;
671
672                 /* end of repost sgl list condition for SCSI buffers */
673                 if (num_posting == sb_count) {
674                         if (post_cnt == 0) {
675                                 /* last sgl posting block */
676                                 list_splice_init(&prep_sblist, &blck_sblist);
677                                 post_cnt = block_cnt;
678                         } else if (block_cnt == 1) {
679                                 /* last single sgl with non-contiguous xri */
680                                 if (sgl_size > SGL_PAGE_SIZE)
681                                         pdma_phys_bpl1 = psb->dma_phys_bpl +
682                                                                 SGL_PAGE_SIZE;
683                                 else
684                                         pdma_phys_bpl1 = 0;
685                                 status = lpfc_sli4_post_sgl(phba,
686                                                 psb->dma_phys_bpl,
687                                                 pdma_phys_bpl1,
688                                                 psb->cur_iocbq.sli4_xritag);
689                                 if (status) {
690                                         /* failure, put on abort scsi list */
691                                         psb->exch_busy = 1;
692                                 } else {
693                                         /* success, put on SCSI buffer list */
694                                         psb->exch_busy = 0;
695                                         psb->status = IOSTAT_SUCCESS;
696                                         num_posted++;
697                                 }
698                                 /* success, put on SCSI buffer sgl list */
699                                 list_add_tail(&psb->list, &scsi_sblist);
700                         }
701                 }
702
703                 /* continue until a nembed page worth of sgls */
704                 if (post_cnt == 0)
705                         continue;
706
707                 /* post block of SCSI buffer list sgls */
708                 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
709                                                        post_cnt);
710
711                 /* don't reset xirtag due to hole in xri block */
712                 if (block_cnt == 0)
713                         last_xritag = NO_XRI;
714
715                 /* reset SCSI buffer post count for next round of posting */
716                 post_cnt = 0;
717
718                 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
719                 while (!list_empty(&blck_sblist)) {
720                         list_remove_head(&blck_sblist, psb,
721                                          struct lpfc_scsi_buf, list);
722                         if (status) {
723                                 /* failure, put on abort scsi list */
724                                 psb->exch_busy = 1;
725                         } else {
726                                 /* success, put on SCSI buffer list */
727                                 psb->exch_busy = 0;
728                                 psb->status = IOSTAT_SUCCESS;
729                                 num_posted++;
730                         }
731                         list_add_tail(&psb->list, &scsi_sblist);
732                 }
733         }
734         /* Push SCSI buffers with sgl posted to the availble list */
735         while (!list_empty(&scsi_sblist)) {
736                 list_remove_head(&scsi_sblist, psb,
737                                  struct lpfc_scsi_buf, list);
738                 lpfc_release_scsi_buf_s4(phba, psb);
739         }
740         return num_posted;
741 }
742
743 /**
744  * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
745  * @phba: pointer to lpfc hba data structure.
746  *
747  * This routine walks the list of scsi buffers that have been allocated and
748  * repost them to the port by using SGL block post. This is needed after a
749  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
750  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
751  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
752  *
753  * Returns: 0 = success, non-zero failure.
754  **/
755 int
756 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
757 {
758         LIST_HEAD(post_sblist);
759         int num_posted, rc = 0;
760
761         /* get all SCSI buffers need to repost to a local list */
762         spin_lock_irq(&phba->scsi_buf_list_get_lock);
763         spin_lock(&phba->scsi_buf_list_put_lock);
764         list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
765         list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
766         spin_unlock(&phba->scsi_buf_list_put_lock);
767         spin_unlock_irq(&phba->scsi_buf_list_get_lock);
768
769         /* post the list of scsi buffer sgls to port if available */
770         if (!list_empty(&post_sblist)) {
771                 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
772                                                 phba->sli4_hba.scsi_xri_cnt);
773                 /* failed to post any scsi buffer, return error */
774                 if (num_posted == 0)
775                         rc = -EIO;
776         }
777         return rc;
778 }
779
780 /**
781  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
782  * @vport: The virtual port for which this call being executed.
783  * @num_to_allocate: The requested number of buffers to allocate.
784  *
785  * This routine allocates scsi buffers for device with SLI-4 interface spec,
786  * the scsi buffer contains all the necessary information needed to initiate
787  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
788  * them on a list, it post them to the port by using SGL block post.
789  *
790  * Return codes:
791  *   int - number of scsi buffers that were allocated and posted.
792  *   0 = failure, less than num_to_alloc is a partial failure.
793  **/
794 static int
795 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
796 {
797         struct lpfc_hba *phba = vport->phba;
798         struct lpfc_scsi_buf *psb;
799         struct sli4_sge *sgl;
800         IOCB_t *iocb;
801         dma_addr_t pdma_phys_fcp_cmd;
802         dma_addr_t pdma_phys_fcp_rsp;
803         dma_addr_t pdma_phys_bpl;
804         uint16_t iotag, lxri = 0;
805         int bcnt, num_posted, sgl_size;
806         LIST_HEAD(prep_sblist);
807         LIST_HEAD(post_sblist);
808         LIST_HEAD(scsi_sblist);
809
810         sgl_size = phba->cfg_sg_dma_buf_size -
811                 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
812
813         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
814                          "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
815                          num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
816                          (int)sizeof(struct fcp_cmnd),
817                          (int)sizeof(struct fcp_rsp));
818
819         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
820                 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
821                 if (!psb)
822                         break;
823                 /*
824                  * Get memory from the pci pool to map the virt space to
825                  * pci bus space for an I/O. The DMA buffer includes space
826                  * for the struct fcp_cmnd, struct fcp_rsp and the number
827                  * of bde's necessary to support the sg_tablesize.
828                  */
829                 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
830                                                 GFP_KERNEL, &psb->dma_handle);
831                 if (!psb->data) {
832                         kfree(psb);
833                         break;
834                 }
835
836                 /*
837                  * 4K Page alignment is CRITICAL to BlockGuard, double check
838                  * to be sure.
839                  */
840                 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
841                     (((unsigned long)(psb->data) &
842                     (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
843                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
844                                         "3369 Memory alignment error "
845                                         "addr=%lx\n",
846                                         (unsigned long)psb->data);
847                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
848                                       psb->data, psb->dma_handle);
849                         kfree(psb);
850                         break;
851                 }
852
853
854                 lxri = lpfc_sli4_next_xritag(phba);
855                 if (lxri == NO_XRI) {
856                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
857                                       psb->data, psb->dma_handle);
858                         kfree(psb);
859                         break;
860                 }
861
862                 /* Allocate iotag for psb->cur_iocbq. */
863                 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
864                 if (iotag == 0) {
865                         dma_pool_free(phba->lpfc_sg_dma_buf_pool,
866                                       psb->data, psb->dma_handle);
867                         kfree(psb);
868                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
869                                         "3368 Failed to allocate IOTAG for"
870                                         " XRI:0x%x\n", lxri);
871                         lpfc_sli4_free_xri(phba, lxri);
872                         break;
873                 }
874                 psb->cur_iocbq.sli4_lxritag = lxri;
875                 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
876                 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
877                 psb->fcp_bpl = psb->data;
878                 psb->fcp_cmnd = (psb->data + sgl_size);
879                 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
880                                         sizeof(struct fcp_cmnd));
881
882                 /* Initialize local short-hand pointers. */
883                 sgl = (struct sli4_sge *)psb->fcp_bpl;
884                 pdma_phys_bpl = psb->dma_handle;
885                 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
886                 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
887
888                 /*
889                  * The first two bdes are the FCP_CMD and FCP_RSP.
890                  * The balance are sg list bdes. Initialize the
891                  * first two and leave the rest for queuecommand.
892                  */
893                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
894                 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
895                 sgl->word2 = le32_to_cpu(sgl->word2);
896                 bf_set(lpfc_sli4_sge_last, sgl, 0);
897                 sgl->word2 = cpu_to_le32(sgl->word2);
898                 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
899                 sgl++;
900
901                 /* Setup the physical region for the FCP RSP */
902                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
903                 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
904                 sgl->word2 = le32_to_cpu(sgl->word2);
905                 bf_set(lpfc_sli4_sge_last, sgl, 1);
906                 sgl->word2 = cpu_to_le32(sgl->word2);
907                 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
908
909                 /*
910                  * Since the IOCB for the FCP I/O is built into this
911                  * lpfc_scsi_buf, initialize it with all known data now.
912                  */
913                 iocb = &psb->cur_iocbq.iocb;
914                 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
915                 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
916                 /* setting the BLP size to 2 * sizeof BDE may not be correct.
917                  * We are setting the bpl to point to out sgl. An sgl's
918                  * entries are 16 bytes, a bpl entries are 12 bytes.
919                  */
920                 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
921                 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
922                 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
923                 iocb->ulpBdeCount = 1;
924                 iocb->ulpLe = 1;
925                 iocb->ulpClass = CLASS3;
926                 psb->cur_iocbq.context1 = psb;
927                 psb->dma_phys_bpl = pdma_phys_bpl;
928
929                 /* add the scsi buffer to a post list */
930                 list_add_tail(&psb->list, &post_sblist);
931                 spin_lock_irq(&phba->scsi_buf_list_get_lock);
932                 phba->sli4_hba.scsi_xri_cnt++;
933                 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
934         }
935         lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
936                         "3021 Allocate %d out of %d requested new SCSI "
937                         "buffers\n", bcnt, num_to_alloc);
938
939         /* post the list of scsi buffer sgls to port if available */
940         if (!list_empty(&post_sblist))
941                 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
942                                                           &post_sblist, bcnt);
943         else
944                 num_posted = 0;
945
946         return num_posted;
947 }
948
949 /**
950  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
951  * @vport: The virtual port for which this call being executed.
952  * @num_to_allocate: The requested number of buffers to allocate.
953  *
954  * This routine wraps the actual SCSI buffer allocator function pointer from
955  * the lpfc_hba struct.
956  *
957  * Return codes:
958  *   int - number of scsi buffers that were allocated.
959  *   0 = failure, less than num_to_alloc is a partial failure.
960  **/
961 static inline int
962 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
963 {
964         return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
965 }
966
967 /**
968  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
969  * @phba: The HBA for which this call is being executed.
970  *
971  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
972  * and returns to caller.
973  *
974  * Return codes:
975  *   NULL - Error
976  *   Pointer to lpfc_scsi_buf - Success
977  **/
978 static struct lpfc_scsi_buf*
979 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
980 {
981         struct  lpfc_scsi_buf * lpfc_cmd = NULL;
982         struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
983         unsigned long iflag = 0;
984
985         spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
986         list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
987                          list);
988         if (!lpfc_cmd) {
989                 spin_lock(&phba->scsi_buf_list_put_lock);
990                 list_splice(&phba->lpfc_scsi_buf_list_put,
991                             &phba->lpfc_scsi_buf_list_get);
992                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
993                 list_remove_head(scsi_buf_list_get, lpfc_cmd,
994                                  struct lpfc_scsi_buf, list);
995                 spin_unlock(&phba->scsi_buf_list_put_lock);
996         }
997         spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
998
999         if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
1000                 atomic_inc(&ndlp->cmd_pending);
1001                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1002         }
1003         return  lpfc_cmd;
1004 }
1005 /**
1006  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1007  * @phba: The HBA for which this call is being executed.
1008  *
1009  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1010  * and returns to caller.
1011  *
1012  * Return codes:
1013  *   NULL - Error
1014  *   Pointer to lpfc_scsi_buf - Success
1015  **/
1016 static struct lpfc_scsi_buf*
1017 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1018 {
1019         struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1020         unsigned long iflag = 0;
1021         int found = 0;
1022
1023         spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1024         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1025                                  &phba->lpfc_scsi_buf_list_get, list) {
1026                 if (lpfc_test_rrq_active(phba, ndlp,
1027                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
1028                         continue;
1029                 list_del_init(&lpfc_cmd->list);
1030                 found = 1;
1031                 break;
1032         }
1033         if (!found) {
1034                 spin_lock(&phba->scsi_buf_list_put_lock);
1035                 list_splice(&phba->lpfc_scsi_buf_list_put,
1036                             &phba->lpfc_scsi_buf_list_get);
1037                 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1038                 spin_unlock(&phba->scsi_buf_list_put_lock);
1039                 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1040                                          &phba->lpfc_scsi_buf_list_get, list) {
1041                         if (lpfc_test_rrq_active(
1042                                 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1043                                 continue;
1044                         list_del_init(&lpfc_cmd->list);
1045                         found = 1;
1046                         break;
1047                 }
1048         }
1049         spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1050         if (!found)
1051                 return NULL;
1052
1053         if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
1054                 atomic_inc(&ndlp->cmd_pending);
1055                 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1056         }
1057         return  lpfc_cmd;
1058 }
1059 /**
1060  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1061  * @phba: The HBA for which this call is being executed.
1062  *
1063  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1064  * and returns to caller.
1065  *
1066  * Return codes:
1067  *   NULL - Error
1068  *   Pointer to lpfc_scsi_buf - Success
1069  **/
1070 static struct lpfc_scsi_buf*
1071 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1072 {
1073         return  phba->lpfc_get_scsi_buf(phba, ndlp);
1074 }
1075
1076 /**
1077  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1078  * @phba: The Hba for which this call is being executed.
1079  * @psb: The scsi buffer which is being released.
1080  *
1081  * This routine releases @psb scsi buffer by adding it to tail of @phba
1082  * lpfc_scsi_buf_list list.
1083  **/
1084 static void
1085 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1086 {
1087         unsigned long iflag = 0;
1088
1089         psb->seg_cnt = 0;
1090         psb->nonsg_phys = 0;
1091         psb->prot_seg_cnt = 0;
1092
1093         spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1094         psb->pCmd = NULL;
1095         psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1096         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1097         spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1098 }
1099
1100 /**
1101  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1102  * @phba: The Hba for which this call is being executed.
1103  * @psb: The scsi buffer which is being released.
1104  *
1105  * This routine releases @psb scsi buffer by adding it to tail of @phba
1106  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1107  * and cannot be reused for at least RA_TOV amount of time if it was
1108  * aborted.
1109  **/
1110 static void
1111 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1112 {
1113         unsigned long iflag = 0;
1114
1115         psb->seg_cnt = 0;
1116         psb->nonsg_phys = 0;
1117         psb->prot_seg_cnt = 0;
1118
1119         if (psb->exch_busy) {
1120                 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1121                                         iflag);
1122                 psb->pCmd = NULL;
1123                 list_add_tail(&psb->list,
1124                         &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1125                 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1126                                         iflag);
1127         } else {
1128                 psb->pCmd = NULL;
1129                 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1130                 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1131                 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1132                 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1133         }
1134 }
1135
1136 /**
1137  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1138  * @phba: The Hba for which this call is being executed.
1139  * @psb: The scsi buffer which is being released.
1140  *
1141  * This routine releases @psb scsi buffer by adding it to tail of @phba
1142  * lpfc_scsi_buf_list list.
1143  **/
1144 static void
1145 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1146 {
1147         if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
1148                 atomic_dec(&psb->ndlp->cmd_pending);
1149
1150         psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
1151         phba->lpfc_release_scsi_buf(phba, psb);
1152 }
1153
1154 /**
1155  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1156  * @phba: The Hba for which this call is being executed.
1157  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1158  *
1159  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1160  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1161  * through sg elements and format the bde. This routine also initializes all
1162  * IOCB fields which are dependent on scsi command request buffer.
1163  *
1164  * Return codes:
1165  *   1 - Error
1166  *   0 - Success
1167  **/
1168 static int
1169 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1170 {
1171         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1172         struct scatterlist *sgel = NULL;
1173         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1174         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1175         struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1176         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1177         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1178         dma_addr_t physaddr;
1179         uint32_t num_bde = 0;
1180         int nseg, datadir = scsi_cmnd->sc_data_direction;
1181
1182         /*
1183          * There are three possibilities here - use scatter-gather segment, use
1184          * the single mapping, or neither.  Start the lpfc command prep by
1185          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1186          * data bde entry.
1187          */
1188         bpl += 2;
1189         if (scsi_sg_count(scsi_cmnd)) {
1190                 /*
1191                  * The driver stores the segment count returned from pci_map_sg
1192                  * because this a count of dma-mappings used to map the use_sg
1193                  * pages.  They are not guaranteed to be the same for those
1194                  * architectures that implement an IOMMU.
1195                  */
1196
1197                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1198                                   scsi_sg_count(scsi_cmnd), datadir);
1199                 if (unlikely(!nseg))
1200                         return 1;
1201
1202                 lpfc_cmd->seg_cnt = nseg;
1203                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1204                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1205                                 "9064 BLKGRD: %s: Too many sg segments from "
1206                                "dma_map_sg.  Config %d, seg_cnt %d\n",
1207                                __func__, phba->cfg_sg_seg_cnt,
1208                                lpfc_cmd->seg_cnt);
1209                         lpfc_cmd->seg_cnt = 0;
1210                         scsi_dma_unmap(scsi_cmnd);
1211                         return 1;
1212                 }
1213
1214                 /*
1215                  * The driver established a maximum scatter-gather segment count
1216                  * during probe that limits the number of sg elements in any
1217                  * single scsi command.  Just run through the seg_cnt and format
1218                  * the bde's.
1219                  * When using SLI-3 the driver will try to fit all the BDEs into
1220                  * the IOCB. If it can't then the BDEs get added to a BPL as it
1221                  * does for SLI-2 mode.
1222                  */
1223                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1224                         physaddr = sg_dma_address(sgel);
1225                         if (phba->sli_rev == 3 &&
1226                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1227                             !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1228                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1229                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1230                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1231                                 data_bde->addrLow = putPaddrLow(physaddr);
1232                                 data_bde->addrHigh = putPaddrHigh(physaddr);
1233                                 data_bde++;
1234                         } else {
1235                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1236                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1237                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1238                                 bpl->addrLow =
1239                                         le32_to_cpu(putPaddrLow(physaddr));
1240                                 bpl->addrHigh =
1241                                         le32_to_cpu(putPaddrHigh(physaddr));
1242                                 bpl++;
1243                         }
1244                 }
1245         }
1246
1247         /*
1248          * Finish initializing those IOCB fields that are dependent on the
1249          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1250          * explicitly reinitialized and for SLI-3 the extended bde count is
1251          * explicitly reinitialized since all iocb memory resources are reused.
1252          */
1253         if (phba->sli_rev == 3 &&
1254             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1255             !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1256                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1257                         /*
1258                          * The extended IOCB format can only fit 3 BDE or a BPL.
1259                          * This I/O has more than 3 BDE so the 1st data bde will
1260                          * be a BPL that is filled in here.
1261                          */
1262                         physaddr = lpfc_cmd->dma_handle;
1263                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1264                         data_bde->tus.f.bdeSize = (num_bde *
1265                                                    sizeof(struct ulp_bde64));
1266                         physaddr += (sizeof(struct fcp_cmnd) +
1267                                      sizeof(struct fcp_rsp) +
1268                                      (2 * sizeof(struct ulp_bde64)));
1269                         data_bde->addrHigh = putPaddrHigh(physaddr);
1270                         data_bde->addrLow = putPaddrLow(physaddr);
1271                         /* ebde count includes the response bde and data bpl */
1272                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1273                 } else {
1274                         /* ebde count includes the response bde and data bdes */
1275                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1276                 }
1277         } else {
1278                 iocb_cmd->un.fcpi64.bdl.bdeSize =
1279                         ((num_bde + 2) * sizeof(struct ulp_bde64));
1280                 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1281         }
1282         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1283
1284         /*
1285          * Due to difference in data length between DIF/non-DIF paths,
1286          * we need to set word 4 of IOCB here
1287          */
1288         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1289         return 0;
1290 }
1291
1292 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1293
1294 /* Return BG_ERR_INIT if error injection is detected by Initiator */
1295 #define BG_ERR_INIT     0x1
1296 /* Return BG_ERR_TGT if error injection is detected by Target */
1297 #define BG_ERR_TGT      0x2
1298 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
1299 #define BG_ERR_SWAP     0x10
1300 /**
1301  * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1302  * error injection
1303  **/
1304 #define BG_ERR_CHECK    0x20
1305
1306 /**
1307  * lpfc_bg_err_inject - Determine if we should inject an error
1308  * @phba: The Hba for which this call is being executed.
1309  * @sc: The SCSI command to examine
1310  * @reftag: (out) BlockGuard reference tag for transmitted data
1311  * @apptag: (out) BlockGuard application tag for transmitted data
1312  * @new_guard (in) Value to replace CRC with if needed
1313  *
1314  * Returns BG_ERR_* bit mask or 0 if request ignored
1315  **/
1316 static int
1317 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1318                 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1319 {
1320         struct scatterlist *sgpe; /* s/g prot entry */
1321         struct lpfc_scsi_buf *lpfc_cmd = NULL;
1322         struct scsi_dif_tuple *src = NULL;
1323         struct lpfc_nodelist *ndlp;
1324         struct lpfc_rport_data *rdata;
1325         uint32_t op = scsi_get_prot_op(sc);
1326         uint32_t blksize;
1327         uint32_t numblks;
1328         sector_t lba;
1329         int rc = 0;
1330         int blockoff = 0;
1331
1332         if (op == SCSI_PROT_NORMAL)
1333                 return 0;
1334
1335         sgpe = scsi_prot_sglist(sc);
1336         lba = scsi_get_lba(sc);
1337
1338         /* First check if we need to match the LBA */
1339         if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1340                 blksize = lpfc_cmd_blksize(sc);
1341                 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1342
1343                 /* Make sure we have the right LBA if one is specified */
1344                 if ((phba->lpfc_injerr_lba < lba) ||
1345                         (phba->lpfc_injerr_lba >= (lba + numblks)))
1346                         return 0;
1347                 if (sgpe) {
1348                         blockoff = phba->lpfc_injerr_lba - lba;
1349                         numblks = sg_dma_len(sgpe) /
1350                                 sizeof(struct scsi_dif_tuple);
1351                         if (numblks < blockoff)
1352                                 blockoff = numblks;
1353                 }
1354         }
1355
1356         /* Next check if we need to match the remote NPortID or WWPN */
1357         rdata = lpfc_rport_data_from_scsi_device(sc->device);
1358         if (rdata && rdata->pnode) {
1359                 ndlp = rdata->pnode;
1360
1361                 /* Make sure we have the right NPortID if one is specified */
1362                 if (phba->lpfc_injerr_nportid  &&
1363                         (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1364                         return 0;
1365
1366                 /*
1367                  * Make sure we have the right WWPN if one is specified.
1368                  * wwn[0] should be a non-zero NAA in a good WWPN.
1369                  */
1370                 if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1371                         (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1372                                 sizeof(struct lpfc_name)) != 0))
1373                         return 0;
1374         }
1375
1376         /* Setup a ptr to the protection data if the SCSI host provides it */
1377         if (sgpe) {
1378                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1379                 src += blockoff;
1380                 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1381         }
1382
1383         /* Should we change the Reference Tag */
1384         if (reftag) {
1385                 if (phba->lpfc_injerr_wref_cnt) {
1386                         switch (op) {
1387                         case SCSI_PROT_WRITE_PASS:
1388                                 if (src) {
1389                                         /*
1390                                          * For WRITE_PASS, force the error
1391                                          * to be sent on the wire. It should
1392                                          * be detected by the Target.
1393                                          * If blockoff != 0 error will be
1394                                          * inserted in middle of the IO.
1395                                          */
1396
1397                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1398                                         "9076 BLKGRD: Injecting reftag error: "
1399                                         "write lba x%lx + x%x oldrefTag x%x\n",
1400                                         (unsigned long)lba, blockoff,
1401                                         be32_to_cpu(src->ref_tag));
1402
1403                                         /*
1404                                          * Save the old ref_tag so we can
1405                                          * restore it on completion.
1406                                          */
1407                                         if (lpfc_cmd) {
1408                                                 lpfc_cmd->prot_data_type =
1409                                                         LPFC_INJERR_REFTAG;
1410                                                 lpfc_cmd->prot_data_segment =
1411                                                         src;
1412                                                 lpfc_cmd->prot_data =
1413                                                         src->ref_tag;
1414                                         }
1415                                         src->ref_tag = cpu_to_be32(0xDEADBEEF);
1416                                         phba->lpfc_injerr_wref_cnt--;
1417                                         if (phba->lpfc_injerr_wref_cnt == 0) {
1418                                                 phba->lpfc_injerr_nportid = 0;
1419                                                 phba->lpfc_injerr_lba =
1420                                                         LPFC_INJERR_LBA_OFF;
1421                                                 memset(&phba->lpfc_injerr_wwpn,
1422                                                   0, sizeof(struct lpfc_name));
1423                                         }
1424                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1425
1426                                         break;
1427                                 }
1428                                 /* Drop thru */
1429                         case SCSI_PROT_WRITE_INSERT:
1430                                 /*
1431                                  * For WRITE_INSERT, force the error
1432                                  * to be sent on the wire. It should be
1433                                  * detected by the Target.
1434                                  */
1435                                 /* DEADBEEF will be the reftag on the wire */
1436                                 *reftag = 0xDEADBEEF;
1437                                 phba->lpfc_injerr_wref_cnt--;
1438                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1439                                         phba->lpfc_injerr_nportid = 0;
1440                                         phba->lpfc_injerr_lba =
1441                                         LPFC_INJERR_LBA_OFF;
1442                                         memset(&phba->lpfc_injerr_wwpn,
1443                                                 0, sizeof(struct lpfc_name));
1444                                 }
1445                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1446
1447                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1448                                         "9078 BLKGRD: Injecting reftag error: "
1449                                         "write lba x%lx\n", (unsigned long)lba);
1450                                 break;
1451                         case SCSI_PROT_WRITE_STRIP:
1452                                 /*
1453                                  * For WRITE_STRIP and WRITE_PASS,
1454                                  * force the error on data
1455                                  * being copied from SLI-Host to SLI-Port.
1456                                  */
1457                                 *reftag = 0xDEADBEEF;
1458                                 phba->lpfc_injerr_wref_cnt--;
1459                                 if (phba->lpfc_injerr_wref_cnt == 0) {
1460                                         phba->lpfc_injerr_nportid = 0;
1461                                         phba->lpfc_injerr_lba =
1462                                                 LPFC_INJERR_LBA_OFF;
1463                                         memset(&phba->lpfc_injerr_wwpn,
1464                                                 0, sizeof(struct lpfc_name));
1465                                 }
1466                                 rc = BG_ERR_INIT;
1467
1468                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1469                                         "9077 BLKGRD: Injecting reftag error: "
1470                                         "write lba x%lx\n", (unsigned long)lba);
1471                                 break;
1472                         }
1473                 }
1474                 if (phba->lpfc_injerr_rref_cnt) {
1475                         switch (op) {
1476                         case SCSI_PROT_READ_INSERT:
1477                         case SCSI_PROT_READ_STRIP:
1478                         case SCSI_PROT_READ_PASS:
1479                                 /*
1480                                  * For READ_STRIP and READ_PASS, force the
1481                                  * error on data being read off the wire. It
1482                                  * should force an IO error to the driver.
1483                                  */
1484                                 *reftag = 0xDEADBEEF;
1485                                 phba->lpfc_injerr_rref_cnt--;
1486                                 if (phba->lpfc_injerr_rref_cnt == 0) {
1487                                         phba->lpfc_injerr_nportid = 0;
1488                                         phba->lpfc_injerr_lba =
1489                                                 LPFC_INJERR_LBA_OFF;
1490                                         memset(&phba->lpfc_injerr_wwpn,
1491                                                 0, sizeof(struct lpfc_name));
1492                                 }
1493                                 rc = BG_ERR_INIT;
1494
1495                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1496                                         "9079 BLKGRD: Injecting reftag error: "
1497                                         "read lba x%lx\n", (unsigned long)lba);
1498                                 break;
1499                         }
1500                 }
1501         }
1502
1503         /* Should we change the Application Tag */
1504         if (apptag) {
1505                 if (phba->lpfc_injerr_wapp_cnt) {
1506                         switch (op) {
1507                         case SCSI_PROT_WRITE_PASS:
1508                                 if (src) {
1509                                         /*
1510                                          * For WRITE_PASS, force the error
1511                                          * to be sent on the wire. It should
1512                                          * be detected by the Target.
1513                                          * If blockoff != 0 error will be
1514                                          * inserted in middle of the IO.
1515                                          */
1516
1517                                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1518                                         "9080 BLKGRD: Injecting apptag error: "
1519                                         "write lba x%lx + x%x oldappTag x%x\n",
1520                                         (unsigned long)lba, blockoff,
1521                                         be16_to_cpu(src->app_tag));
1522
1523                                         /*
1524                                          * Save the old app_tag so we can
1525                                          * restore it on completion.
1526                                          */
1527                                         if (lpfc_cmd) {
1528                                                 lpfc_cmd->prot_data_type =
1529                                                         LPFC_INJERR_APPTAG;
1530                                                 lpfc_cmd->prot_data_segment =
1531                                                         src;
1532                                                 lpfc_cmd->prot_data =
1533                                                         src->app_tag;
1534                                         }
1535                                         src->app_tag = cpu_to_be16(0xDEAD);
1536                                         phba->lpfc_injerr_wapp_cnt--;
1537                                         if (phba->lpfc_injerr_wapp_cnt == 0) {
1538                                                 phba->lpfc_injerr_nportid = 0;
1539                                                 phba->lpfc_injerr_lba =
1540                                                         LPFC_INJERR_LBA_OFF;
1541                                                 memset(&phba->lpfc_injerr_wwpn,
1542                                                   0, sizeof(struct lpfc_name));
1543                                         }
1544                                         rc = BG_ERR_TGT | BG_ERR_CHECK;
1545                                         break;
1546                                 }
1547                                 /* Drop thru */
1548                         case SCSI_PROT_WRITE_INSERT:
1549                                 /*
1550                                  * For WRITE_INSERT, force the
1551                                  * error to be sent on the wire. It should be
1552                                  * detected by the Target.
1553                                  */
1554                                 /* DEAD will be the apptag on the wire */
1555                                 *apptag = 0xDEAD;
1556                                 phba->lpfc_injerr_wapp_cnt--;
1557                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1558                                         phba->lpfc_injerr_nportid = 0;
1559                                         phba->lpfc_injerr_lba =
1560                                                 LPFC_INJERR_LBA_OFF;
1561                                         memset(&phba->lpfc_injerr_wwpn,
1562                                                 0, sizeof(struct lpfc_name));
1563                                 }
1564                                 rc = BG_ERR_TGT | BG_ERR_CHECK;
1565
1566                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1567                                         "0813 BLKGRD: Injecting apptag error: "
1568                                         "write lba x%lx\n", (unsigned long)lba);
1569                                 break;
1570                         case SCSI_PROT_WRITE_STRIP:
1571                                 /*
1572                                  * For WRITE_STRIP and WRITE_PASS,
1573                                  * force the error on data
1574                                  * being copied from SLI-Host to SLI-Port.
1575                                  */
1576                                 *apptag = 0xDEAD;
1577                                 phba->lpfc_injerr_wapp_cnt--;
1578                                 if (phba->lpfc_injerr_wapp_cnt == 0) {
1579                                         phba->lpfc_injerr_nportid = 0;
1580                                         phba->lpfc_injerr_lba =
1581                                                 LPFC_INJERR_LBA_OFF;
1582                                         memset(&phba->lpfc_injerr_wwpn,
1583                                                 0, sizeof(struct lpfc_name));
1584                                 }
1585                                 rc = BG_ERR_INIT;
1586
1587                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1588                                         "0812 BLKGRD: Injecting apptag error: "
1589                                         "write lba x%lx\n", (unsigned long)lba);
1590                                 break;
1591                         }
1592                 }
1593                 if (phba->lpfc_injerr_rapp_cnt) {
1594                         switch (op) {
1595                         case SCSI_PROT_READ_INSERT:
1596                         case SCSI_PROT_READ_STRIP:
1597                         case SCSI_PROT_READ_PASS:
1598                                 /*
1599                                  * For READ_STRIP and READ_PASS, force the
1600                                  * error on data being read off the wire. It
1601                                  * should force an IO error to the driver.
1602                                  */
1603                                 *apptag = 0xDEAD;
1604                                 phba->lpfc_injerr_rapp_cnt--;
1605                                 if (phba->lpfc_injerr_rapp_cnt == 0) {
1606                                         phba->lpfc_injerr_nportid = 0;
1607                                         phba->lpfc_injerr_lba =
1608                                                 LPFC_INJERR_LBA_OFF;
1609                                         memset(&phba->lpfc_injerr_wwpn,
1610                                                 0, sizeof(struct lpfc_name));
1611                                 }
1612                                 rc = BG_ERR_INIT;
1613
1614                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1615                                         "0814 BLKGRD: Injecting apptag error: "
1616                                         "read lba x%lx\n", (unsigned long)lba);
1617                                 break;
1618                         }
1619                 }
1620         }
1621
1622
1623         /* Should we change the Guard Tag */
1624         if (new_guard) {
1625                 if (phba->lpfc_injerr_wgrd_cnt) {
1626                         switch (op) {
1627                         case SCSI_PROT_WRITE_PASS:
1628                                 rc = BG_ERR_CHECK;
1629                                 /* Drop thru */
1630
1631                         case SCSI_PROT_WRITE_INSERT:
1632                                 /*
1633                                  * For WRITE_INSERT, force the
1634                                  * error to be sent on the wire. It should be
1635                                  * detected by the Target.
1636                                  */
1637                                 phba->lpfc_injerr_wgrd_cnt--;
1638                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1639                                         phba->lpfc_injerr_nportid = 0;
1640                                         phba->lpfc_injerr_lba =
1641                                                 LPFC_INJERR_LBA_OFF;
1642                                         memset(&phba->lpfc_injerr_wwpn,
1643                                                 0, sizeof(struct lpfc_name));
1644                                 }
1645
1646                                 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1647                                 /* Signals the caller to swap CRC->CSUM */
1648
1649                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1650                                         "0817 BLKGRD: Injecting guard error: "
1651                                         "write lba x%lx\n", (unsigned long)lba);
1652                                 break;
1653                         case SCSI_PROT_WRITE_STRIP:
1654                                 /*
1655                                  * For WRITE_STRIP and WRITE_PASS,
1656                                  * force the error on data
1657                                  * being copied from SLI-Host to SLI-Port.
1658                                  */
1659                                 phba->lpfc_injerr_wgrd_cnt--;
1660                                 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1661                                         phba->lpfc_injerr_nportid = 0;
1662                                         phba->lpfc_injerr_lba =
1663                                                 LPFC_INJERR_LBA_OFF;
1664                                         memset(&phba->lpfc_injerr_wwpn,
1665                                                 0, sizeof(struct lpfc_name));
1666                                 }
1667
1668                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1669                                 /* Signals the caller to swap CRC->CSUM */
1670
1671                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1672                                         "0816 BLKGRD: Injecting guard error: "
1673                                         "write lba x%lx\n", (unsigned long)lba);
1674                                 break;
1675                         }
1676                 }
1677                 if (phba->lpfc_injerr_rgrd_cnt) {
1678                         switch (op) {
1679                         case SCSI_PROT_READ_INSERT:
1680                         case SCSI_PROT_READ_STRIP:
1681                         case SCSI_PROT_READ_PASS:
1682                                 /*
1683                                  * For READ_STRIP and READ_PASS, force the
1684                                  * error on data being read off the wire. It
1685                                  * should force an IO error to the driver.
1686                                  */
1687                                 phba->lpfc_injerr_rgrd_cnt--;
1688                                 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1689                                         phba->lpfc_injerr_nportid = 0;
1690                                         phba->lpfc_injerr_lba =
1691                                                 LPFC_INJERR_LBA_OFF;
1692                                         memset(&phba->lpfc_injerr_wwpn,
1693                                                 0, sizeof(struct lpfc_name));
1694                                 }
1695
1696                                 rc = BG_ERR_INIT | BG_ERR_SWAP;
1697                                 /* Signals the caller to swap CRC->CSUM */
1698
1699                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1700                                         "0818 BLKGRD: Injecting guard error: "
1701                                         "read lba x%lx\n", (unsigned long)lba);
1702                         }
1703                 }
1704         }
1705
1706         return rc;
1707 }
1708 #endif
1709
1710 /**
1711  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1712  * the specified SCSI command.
1713  * @phba: The Hba for which this call is being executed.
1714  * @sc: The SCSI command to examine
1715  * @txopt: (out) BlockGuard operation for transmitted data
1716  * @rxopt: (out) BlockGuard operation for received data
1717  *
1718  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1719  *
1720  **/
1721 static int
1722 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1723                 uint8_t *txop, uint8_t *rxop)
1724 {
1725         uint8_t ret = 0;
1726
1727         if (lpfc_cmd_guard_csum(sc)) {
1728                 switch (scsi_get_prot_op(sc)) {
1729                 case SCSI_PROT_READ_INSERT:
1730                 case SCSI_PROT_WRITE_STRIP:
1731                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1732                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1733                         break;
1734
1735                 case SCSI_PROT_READ_STRIP:
1736                 case SCSI_PROT_WRITE_INSERT:
1737                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1738                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1739                         break;
1740
1741                 case SCSI_PROT_READ_PASS:
1742                 case SCSI_PROT_WRITE_PASS:
1743                         *rxop = BG_OP_IN_CRC_OUT_CSUM;
1744                         *txop = BG_OP_IN_CSUM_OUT_CRC;
1745                         break;
1746
1747                 case SCSI_PROT_NORMAL:
1748                 default:
1749                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1750                                 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1751                                         scsi_get_prot_op(sc));
1752                         ret = 1;
1753                         break;
1754
1755                 }
1756         } else {
1757                 switch (scsi_get_prot_op(sc)) {
1758                 case SCSI_PROT_READ_STRIP:
1759                 case SCSI_PROT_WRITE_INSERT:
1760                         *rxop = BG_OP_IN_CRC_OUT_NODIF;
1761                         *txop = BG_OP_IN_NODIF_OUT_CRC;
1762                         break;
1763
1764                 case SCSI_PROT_READ_PASS:
1765                 case SCSI_PROT_WRITE_PASS:
1766                         *rxop = BG_OP_IN_CRC_OUT_CRC;
1767                         *txop = BG_OP_IN_CRC_OUT_CRC;
1768                         break;
1769
1770                 case SCSI_PROT_READ_INSERT:
1771                 case SCSI_PROT_WRITE_STRIP:
1772                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1773                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1774                         break;
1775
1776                 case SCSI_PROT_NORMAL:
1777                 default:
1778                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1779                                 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1780                                         scsi_get_prot_op(sc));
1781                         ret = 1;
1782                         break;
1783                 }
1784         }
1785
1786         return ret;
1787 }
1788
1789 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1790 /**
1791  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1792  * the specified SCSI command in order to force a guard tag error.
1793  * @phba: The Hba for which this call is being executed.
1794  * @sc: The SCSI command to examine
1795  * @txopt: (out) BlockGuard operation for transmitted data
1796  * @rxopt: (out) BlockGuard operation for received data
1797  *
1798  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1799  *
1800  **/
1801 static int
1802 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1803                 uint8_t *txop, uint8_t *rxop)
1804 {
1805         uint8_t ret = 0;
1806
1807         if (lpfc_cmd_guard_csum(sc)) {
1808                 switch (scsi_get_prot_op(sc)) {
1809                 case SCSI_PROT_READ_INSERT:
1810                 case SCSI_PROT_WRITE_STRIP:
1811                         *rxop = BG_OP_IN_NODIF_OUT_CRC;
1812                         *txop = BG_OP_IN_CRC_OUT_NODIF;
1813                         break;
1814
1815                 case SCSI_PROT_READ_STRIP:
1816                 case SCSI_PROT_WRITE_INSERT:
1817                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1818                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1819                         break;
1820
1821                 case SCSI_PROT_READ_PASS:
1822                 case SCSI_PROT_WRITE_PASS:
1823                         *rxop = BG_OP_IN_CSUM_OUT_CRC;
1824                         *txop = BG_OP_IN_CRC_OUT_CSUM;
1825                         break;
1826
1827                 case SCSI_PROT_NORMAL:
1828                 default:
1829                         break;
1830
1831                 }
1832         } else {
1833                 switch (scsi_get_prot_op(sc)) {
1834                 case SCSI_PROT_READ_STRIP:
1835                 case SCSI_PROT_WRITE_INSERT:
1836                         *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1837                         *txop = BG_OP_IN_NODIF_OUT_CSUM;
1838                         break;
1839
1840                 case SCSI_PROT_READ_PASS:
1841                 case SCSI_PROT_WRITE_PASS:
1842                         *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1843                         *txop = BG_OP_IN_CSUM_OUT_CSUM;
1844                         break;
1845
1846                 case SCSI_PROT_READ_INSERT:
1847                 case SCSI_PROT_WRITE_STRIP:
1848                         *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1849                         *txop = BG_OP_IN_CSUM_OUT_NODIF;
1850                         break;
1851
1852                 case SCSI_PROT_NORMAL:
1853                 default:
1854                         break;
1855                 }
1856         }
1857
1858         return ret;
1859 }
1860 #endif
1861
1862 /**
1863  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1864  * @phba: The Hba for which this call is being executed.
1865  * @sc: pointer to scsi command we're working on
1866  * @bpl: pointer to buffer list for protection groups
1867  * @datacnt: number of segments of data that have been dma mapped
1868  *
1869  * This function sets up BPL buffer list for protection groups of
1870  * type LPFC_PG_TYPE_NO_DIF
1871  *
1872  * This is usually used when the HBA is instructed to generate
1873  * DIFs and insert them into data stream (or strip DIF from
1874  * incoming data stream)
1875  *
1876  * The buffer list consists of just one protection group described
1877  * below:
1878  *                                +-------------------------+
1879  *   start of prot group  -->     |          PDE_5          |
1880  *                                +-------------------------+
1881  *                                |          PDE_6          |
1882  *                                +-------------------------+
1883  *                                |         Data BDE        |
1884  *                                +-------------------------+
1885  *                                |more Data BDE's ... (opt)|
1886  *                                +-------------------------+
1887  *
1888  *
1889  * Note: Data s/g buffers have been dma mapped
1890  *
1891  * Returns the number of BDEs added to the BPL.
1892  **/
1893 static int
1894 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1895                 struct ulp_bde64 *bpl, int datasegcnt)
1896 {
1897         struct scatterlist *sgde = NULL; /* s/g data entry */
1898         struct lpfc_pde5 *pde5 = NULL;
1899         struct lpfc_pde6 *pde6 = NULL;
1900         dma_addr_t physaddr;
1901         int i = 0, num_bde = 0, status;
1902         int datadir = sc->sc_data_direction;
1903 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1904         uint32_t rc;
1905 #endif
1906         uint32_t checking = 1;
1907         uint32_t reftag;
1908         uint8_t txop, rxop;
1909
1910         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1911         if (status)
1912                 goto out;
1913
1914         /* extract some info from the scsi command for pde*/
1915         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1916
1917 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1918         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1919         if (rc) {
1920                 if (rc & BG_ERR_SWAP)
1921                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1922                 if (rc & BG_ERR_CHECK)
1923                         checking = 0;
1924         }
1925 #endif
1926
1927         /* setup PDE5 with what we have */
1928         pde5 = (struct lpfc_pde5 *) bpl;
1929         memset(pde5, 0, sizeof(struct lpfc_pde5));
1930         bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1931
1932         /* Endianness conversion if necessary for PDE5 */
1933         pde5->word0 = cpu_to_le32(pde5->word0);
1934         pde5->reftag = cpu_to_le32(reftag);
1935
1936         /* advance bpl and increment bde count */
1937         num_bde++;
1938         bpl++;
1939         pde6 = (struct lpfc_pde6 *) bpl;
1940
1941         /* setup PDE6 with the rest of the info */
1942         memset(pde6, 0, sizeof(struct lpfc_pde6));
1943         bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1944         bf_set(pde6_optx, pde6, txop);
1945         bf_set(pde6_oprx, pde6, rxop);
1946
1947         /*
1948          * We only need to check the data on READs, for WRITEs
1949          * protection data is automatically generated, not checked.
1950          */
1951         if (datadir == DMA_FROM_DEVICE) {
1952                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1953                         bf_set(pde6_ce, pde6, checking);
1954                 else
1955                         bf_set(pde6_ce, pde6, 0);
1956
1957                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1958                         bf_set(pde6_re, pde6, checking);
1959                 else
1960                         bf_set(pde6_re, pde6, 0);
1961         }
1962         bf_set(pde6_ai, pde6, 1);
1963         bf_set(pde6_ae, pde6, 0);
1964         bf_set(pde6_apptagval, pde6, 0);
1965
1966         /* Endianness conversion if necessary for PDE6 */
1967         pde6->word0 = cpu_to_le32(pde6->word0);
1968         pde6->word1 = cpu_to_le32(pde6->word1);
1969         pde6->word2 = cpu_to_le32(pde6->word2);
1970
1971         /* advance bpl and increment bde count */
1972         num_bde++;
1973         bpl++;
1974
1975         /* assumption: caller has already run dma_map_sg on command data */
1976         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1977                 physaddr = sg_dma_address(sgde);
1978                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1979                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1980                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1981                 if (datadir == DMA_TO_DEVICE)
1982                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1983                 else
1984                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1985                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1986                 bpl++;
1987                 num_bde++;
1988         }
1989
1990 out:
1991         return num_bde;
1992 }
1993
1994 /**
1995  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1996  * @phba: The Hba for which this call is being executed.
1997  * @sc: pointer to scsi command we're working on
1998  * @bpl: pointer to buffer list for protection groups
1999  * @datacnt: number of segments of data that have been dma mapped
2000  * @protcnt: number of segment of protection data that have been dma mapped
2001  *
2002  * This function sets up BPL buffer list for protection groups of
2003  * type LPFC_PG_TYPE_DIF
2004  *
2005  * This is usually used when DIFs are in their own buffers,
2006  * separate from the data. The HBA can then by instructed
2007  * to place the DIFs in the outgoing stream.  For read operations,
2008  * The HBA could extract the DIFs and place it in DIF buffers.
2009  *
2010  * The buffer list for this type consists of one or more of the
2011  * protection groups described below:
2012  *                                    +-------------------------+
2013  *   start of first prot group  -->   |          PDE_5          |
2014  *                                    +-------------------------+
2015  *                                    |          PDE_6          |
2016  *                                    +-------------------------+
2017  *                                    |      PDE_7 (Prot BDE)   |
2018  *                                    +-------------------------+
2019  *                                    |        Data BDE         |
2020  *                                    +-------------------------+
2021  *                                    |more Data BDE's ... (opt)|
2022  *                                    +-------------------------+
2023  *   start of new  prot group  -->    |          PDE_5          |
2024  *                                    +-------------------------+
2025  *                                    |          ...            |
2026  *                                    +-------------------------+
2027  *
2028  * Note: It is assumed that both data and protection s/g buffers have been
2029  *       mapped for DMA
2030  *
2031  * Returns the number of BDEs added to the BPL.
2032  **/
2033 static int
2034 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2035                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2036 {
2037         struct scatterlist *sgde = NULL; /* s/g data entry */
2038         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2039         struct lpfc_pde5 *pde5 = NULL;
2040         struct lpfc_pde6 *pde6 = NULL;
2041         struct lpfc_pde7 *pde7 = NULL;
2042         dma_addr_t dataphysaddr, protphysaddr;
2043         unsigned short curr_data = 0, curr_prot = 0;
2044         unsigned int split_offset;
2045         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2046         unsigned int protgrp_blks, protgrp_bytes;
2047         unsigned int remainder, subtotal;
2048         int status;
2049         int datadir = sc->sc_data_direction;
2050         unsigned char pgdone = 0, alldone = 0;
2051         unsigned blksize;
2052 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2053         uint32_t rc;
2054 #endif
2055         uint32_t checking = 1;
2056         uint32_t reftag;
2057         uint8_t txop, rxop;
2058         int num_bde = 0;
2059
2060         sgpe = scsi_prot_sglist(sc);
2061         sgde = scsi_sglist(sc);
2062
2063         if (!sgpe || !sgde) {
2064                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2065                                 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2066                                 sgpe, sgde);
2067                 return 0;
2068         }
2069
2070         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2071         if (status)
2072                 goto out;
2073
2074         /* extract some info from the scsi command */
2075         blksize = lpfc_cmd_blksize(sc);
2076         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2077
2078 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2079         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2080         if (rc) {
2081                 if (rc & BG_ERR_SWAP)
2082                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2083                 if (rc & BG_ERR_CHECK)
2084                         checking = 0;
2085         }
2086 #endif
2087
2088         split_offset = 0;
2089         do {
2090                 /* Check to see if we ran out of space */
2091                 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2092                         return num_bde + 3;
2093
2094                 /* setup PDE5 with what we have */
2095                 pde5 = (struct lpfc_pde5 *) bpl;
2096                 memset(pde5, 0, sizeof(struct lpfc_pde5));
2097                 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2098
2099                 /* Endianness conversion if necessary for PDE5 */
2100                 pde5->word0 = cpu_to_le32(pde5->word0);
2101                 pde5->reftag = cpu_to_le32(reftag);
2102
2103                 /* advance bpl and increment bde count */
2104                 num_bde++;
2105                 bpl++;
2106                 pde6 = (struct lpfc_pde6 *) bpl;
2107
2108                 /* setup PDE6 with the rest of the info */
2109                 memset(pde6, 0, sizeof(struct lpfc_pde6));
2110                 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2111                 bf_set(pde6_optx, pde6, txop);
2112                 bf_set(pde6_oprx, pde6, rxop);
2113
2114                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2115                         bf_set(pde6_ce, pde6, checking);
2116                 else
2117                         bf_set(pde6_ce, pde6, 0);
2118
2119                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2120                         bf_set(pde6_re, pde6, checking);
2121                 else
2122                         bf_set(pde6_re, pde6, 0);
2123
2124                 bf_set(pde6_ai, pde6, 1);
2125                 bf_set(pde6_ae, pde6, 0);
2126                 bf_set(pde6_apptagval, pde6, 0);
2127
2128                 /* Endianness conversion if necessary for PDE6 */
2129                 pde6->word0 = cpu_to_le32(pde6->word0);
2130                 pde6->word1 = cpu_to_le32(pde6->word1);
2131                 pde6->word2 = cpu_to_le32(pde6->word2);
2132
2133                 /* advance bpl and increment bde count */
2134                 num_bde++;
2135                 bpl++;
2136
2137                 /* setup the first BDE that points to protection buffer */
2138                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2139                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2140
2141                 /* must be integer multiple of the DIF block length */
2142                 BUG_ON(protgroup_len % 8);
2143
2144                 pde7 = (struct lpfc_pde7 *) bpl;
2145                 memset(pde7, 0, sizeof(struct lpfc_pde7));
2146                 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2147
2148                 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2149                 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2150
2151                 protgrp_blks = protgroup_len / 8;
2152                 protgrp_bytes = protgrp_blks * blksize;
2153
2154                 /* check if this pde is crossing the 4K boundary; if so split */
2155                 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2156                         protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2157                         protgroup_offset += protgroup_remainder;
2158                         protgrp_blks = protgroup_remainder / 8;
2159                         protgrp_bytes = protgrp_blks * blksize;
2160                 } else {
2161                         protgroup_offset = 0;
2162                         curr_prot++;
2163                 }
2164
2165                 num_bde++;
2166
2167                 /* setup BDE's for data blocks associated with DIF data */
2168                 pgdone = 0;
2169                 subtotal = 0; /* total bytes processed for current prot grp */
2170                 while (!pgdone) {
2171                         /* Check to see if we ran out of space */
2172                         if (num_bde >= phba->cfg_total_seg_cnt)
2173                                 return num_bde + 1;
2174
2175                         if (!sgde) {
2176                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2177                                         "9065 BLKGRD:%s Invalid data segment\n",
2178                                                 __func__);
2179                                 return 0;
2180                         }
2181                         bpl++;
2182                         dataphysaddr = sg_dma_address(sgde) + split_offset;
2183                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2184                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2185
2186                         remainder = sg_dma_len(sgde) - split_offset;
2187
2188                         if ((subtotal + remainder) <= protgrp_bytes) {
2189                                 /* we can use this whole buffer */
2190                                 bpl->tus.f.bdeSize = remainder;
2191                                 split_offset = 0;
2192
2193                                 if ((subtotal + remainder) == protgrp_bytes)
2194                                         pgdone = 1;
2195                         } else {
2196                                 /* must split this buffer with next prot grp */
2197                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2198                                 split_offset += bpl->tus.f.bdeSize;
2199                         }
2200
2201                         subtotal += bpl->tus.f.bdeSize;
2202
2203                         if (datadir == DMA_TO_DEVICE)
2204                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2205                         else
2206                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2207                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
2208
2209                         num_bde++;
2210                         curr_data++;
2211
2212                         if (split_offset)
2213                                 break;
2214
2215                         /* Move to the next s/g segment if possible */
2216                         sgde = sg_next(sgde);
2217
2218                 }
2219
2220                 if (protgroup_offset) {
2221                         /* update the reference tag */
2222                         reftag += protgrp_blks;
2223                         bpl++;
2224                         continue;
2225                 }
2226
2227                 /* are we done ? */
2228                 if (curr_prot == protcnt) {
2229                         alldone = 1;
2230                 } else if (curr_prot < protcnt) {
2231                         /* advance to next prot buffer */
2232                         sgpe = sg_next(sgpe);
2233                         bpl++;
2234
2235                         /* update the reference tag */
2236                         reftag += protgrp_blks;
2237                 } else {
2238                         /* if we're here, we have a bug */
2239                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2240                                 "9054 BLKGRD: bug in %s\n", __func__);
2241                 }
2242
2243         } while (!alldone);
2244 out:
2245
2246         return num_bde;
2247 }
2248
2249 /**
2250  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2251  * @phba: The Hba for which this call is being executed.
2252  * @sc: pointer to scsi command we're working on
2253  * @sgl: pointer to buffer list for protection groups
2254  * @datacnt: number of segments of data that have been dma mapped
2255  *
2256  * This function sets up SGL buffer list for protection groups of
2257  * type LPFC_PG_TYPE_NO_DIF
2258  *
2259  * This is usually used when the HBA is instructed to generate
2260  * DIFs and insert them into data stream (or strip DIF from
2261  * incoming data stream)
2262  *
2263  * The buffer list consists of just one protection group described
2264  * below:
2265  *                                +-------------------------+
2266  *   start of prot group  -->     |         DI_SEED         |
2267  *                                +-------------------------+
2268  *                                |         Data SGE        |
2269  *                                +-------------------------+
2270  *                                |more Data SGE's ... (opt)|
2271  *                                +-------------------------+
2272  *
2273  *
2274  * Note: Data s/g buffers have been dma mapped
2275  *
2276  * Returns the number of SGEs added to the SGL.
2277  **/
2278 static int
2279 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2280                 struct sli4_sge *sgl, int datasegcnt)
2281 {
2282         struct scatterlist *sgde = NULL; /* s/g data entry */
2283         struct sli4_sge_diseed *diseed = NULL;
2284         dma_addr_t physaddr;
2285         int i = 0, num_sge = 0, status;
2286         uint32_t reftag;
2287         uint8_t txop, rxop;
2288 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2289         uint32_t rc;
2290 #endif
2291         uint32_t checking = 1;
2292         uint32_t dma_len;
2293         uint32_t dma_offset = 0;
2294
2295         status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2296         if (status)
2297                 goto out;
2298
2299         /* extract some info from the scsi command for pde*/
2300         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2301
2302 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2303         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2304         if (rc) {
2305                 if (rc & BG_ERR_SWAP)
2306                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2307                 if (rc & BG_ERR_CHECK)
2308                         checking = 0;
2309         }
2310 #endif
2311
2312         /* setup DISEED with what we have */
2313         diseed = (struct sli4_sge_diseed *) sgl;
2314         memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2315         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2316
2317         /* Endianness conversion if necessary */
2318         diseed->ref_tag = cpu_to_le32(reftag);
2319         diseed->ref_tag_tran = diseed->ref_tag;
2320
2321         /*
2322          * We only need to check the data on READs, for WRITEs
2323          * protection data is automatically generated, not checked.
2324          */
2325         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2326                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2327                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2328                 else
2329                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2330
2331                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2332                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2333                 else
2334                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2335         }
2336
2337         /* setup DISEED with the rest of the info */
2338         bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2339         bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2340
2341         bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2342         bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2343
2344         /* Endianness conversion if necessary for DISEED */
2345         diseed->word2 = cpu_to_le32(diseed->word2);
2346         diseed->word3 = cpu_to_le32(diseed->word3);
2347
2348         /* advance bpl and increment sge count */
2349         num_sge++;
2350         sgl++;
2351
2352         /* assumption: caller has already run dma_map_sg on command data */
2353         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2354                 physaddr = sg_dma_address(sgde);
2355                 dma_len = sg_dma_len(sgde);
2356                 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2357                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2358                 if ((i + 1) == datasegcnt)
2359                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2360                 else
2361                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2362                 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2363                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2364
2365                 sgl->sge_len = cpu_to_le32(dma_len);
2366                 dma_offset += dma_len;
2367
2368                 sgl++;
2369                 num_sge++;
2370         }
2371
2372 out:
2373         return num_sge;
2374 }
2375
2376 /**
2377  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2378  * @phba: The Hba for which this call is being executed.
2379  * @sc: pointer to scsi command we're working on
2380  * @sgl: pointer to buffer list for protection groups
2381  * @datacnt: number of segments of data that have been dma mapped
2382  * @protcnt: number of segment of protection data that have been dma mapped
2383  *
2384  * This function sets up SGL buffer list for protection groups of
2385  * type LPFC_PG_TYPE_DIF
2386  *
2387  * This is usually used when DIFs are in their own buffers,
2388  * separate from the data. The HBA can then by instructed
2389  * to place the DIFs in the outgoing stream.  For read operations,
2390  * The HBA could extract the DIFs and place it in DIF buffers.
2391  *
2392  * The buffer list for this type consists of one or more of the
2393  * protection groups described below:
2394  *                                    +-------------------------+
2395  *   start of first prot group  -->   |         DISEED          |
2396  *                                    +-------------------------+
2397  *                                    |      DIF (Prot SGE)     |
2398  *                                    +-------------------------+
2399  *                                    |        Data SGE         |
2400  *                                    +-------------------------+
2401  *                                    |more Data SGE's ... (opt)|
2402  *                                    +-------------------------+
2403  *   start of new  prot group  -->    |         DISEED          |
2404  *                                    +-------------------------+
2405  *                                    |          ...            |
2406  *                                    +-------------------------+
2407  *
2408  * Note: It is assumed that both data and protection s/g buffers have been
2409  *       mapped for DMA
2410  *
2411  * Returns the number of SGEs added to the SGL.
2412  **/
2413 static int
2414 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2415                 struct sli4_sge *sgl, int datacnt, int protcnt)
2416 {
2417         struct scatterlist *sgde = NULL; /* s/g data entry */
2418         struct scatterlist *sgpe = NULL; /* s/g prot entry */
2419         struct sli4_sge_diseed *diseed = NULL;
2420         dma_addr_t dataphysaddr, protphysaddr;
2421         unsigned short curr_data = 0, curr_prot = 0;
2422         unsigned int split_offset;
2423         unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2424         unsigned int protgrp_blks, protgrp_bytes;
2425         unsigned int remainder, subtotal;
2426         int status;
2427         unsigned char pgdone = 0, alldone = 0;
2428         unsigned blksize;
2429         uint32_t reftag;
2430         uint8_t txop, rxop;
2431         uint32_t dma_len;
2432 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2433         uint32_t rc;
2434 #endif
2435         uint32_t checking = 1;
2436         uint32_t dma_offset = 0;
2437         int num_sge = 0;
2438
2439         sgpe = scsi_prot_sglist(sc);
2440         sgde = scsi_sglist(sc);
2441
2442         if (!sgpe || !sgde) {
2443                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2444                                 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2445                                 sgpe, sgde);
2446                 return 0;
2447         }
2448
2449         status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2450         if (status)
2451                 goto out;
2452
2453         /* extract some info from the scsi command */
2454         blksize = lpfc_cmd_blksize(sc);
2455         reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2456
2457 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2458         rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2459         if (rc) {
2460                 if (rc & BG_ERR_SWAP)
2461                         lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2462                 if (rc & BG_ERR_CHECK)
2463                         checking = 0;
2464         }
2465 #endif
2466
2467         split_offset = 0;
2468         do {
2469                 /* Check to see if we ran out of space */
2470                 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2471                         return num_sge + 3;
2472
2473                 /* setup DISEED with what we have */
2474                 diseed = (struct sli4_sge_diseed *) sgl;
2475                 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2476                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2477
2478                 /* Endianness conversion if necessary */
2479                 diseed->ref_tag = cpu_to_le32(reftag);
2480                 diseed->ref_tag_tran = diseed->ref_tag;
2481
2482                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2483                         bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2484
2485                 } else {
2486                         bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2487                         /*
2488                          * When in this mode, the hardware will replace
2489                          * the guard tag from the host with a
2490                          * newly generated good CRC for the wire.
2491                          * Switch to raw mode here to avoid this
2492                          * behavior. What the host sends gets put on the wire.
2493                          */
2494                         if (txop == BG_OP_IN_CRC_OUT_CRC) {
2495                                 txop = BG_OP_RAW_MODE;
2496                                 rxop = BG_OP_RAW_MODE;
2497                         }
2498                 }
2499
2500
2501                 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2502                         bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2503                 else
2504                         bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2505
2506                 /* setup DISEED with the rest of the info */
2507                 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2508                 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2509
2510                 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2511                 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2512
2513                 /* Endianness conversion if necessary for DISEED */
2514                 diseed->word2 = cpu_to_le32(diseed->word2);
2515                 diseed->word3 = cpu_to_le32(diseed->word3);
2516
2517                 /* advance sgl and increment bde count */
2518                 num_sge++;
2519                 sgl++;
2520
2521                 /* setup the first BDE that points to protection buffer */
2522                 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2523                 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2524
2525                 /* must be integer multiple of the DIF block length */
2526                 BUG_ON(protgroup_len % 8);
2527
2528                 /* Now setup DIF SGE */
2529                 sgl->word2 = 0;
2530                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2531                 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2532                 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2533                 sgl->word2 = cpu_to_le32(sgl->word2);
2534
2535                 protgrp_blks = protgroup_len / 8;
2536                 protgrp_bytes = protgrp_blks * blksize;
2537
2538                 /* check if DIF SGE is crossing the 4K boundary; if so split */
2539                 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2540                         protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2541                         protgroup_offset += protgroup_remainder;
2542                         protgrp_blks = protgroup_remainder / 8;
2543                         protgrp_bytes = protgrp_blks * blksize;
2544                 } else {
2545                         protgroup_offset = 0;
2546                         curr_prot++;
2547                 }
2548
2549                 num_sge++;
2550
2551                 /* setup SGE's for data blocks associated with DIF data */
2552                 pgdone = 0;
2553                 subtotal = 0; /* total bytes processed for current prot grp */
2554                 while (!pgdone) {
2555                         /* Check to see if we ran out of space */
2556                         if (num_sge >= phba->cfg_total_seg_cnt)
2557                                 return num_sge + 1;
2558
2559                         if (!sgde) {
2560                                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2561                                         "9086 BLKGRD:%s Invalid data segment\n",
2562                                                 __func__);
2563                                 return 0;
2564                         }
2565                         sgl++;
2566                         dataphysaddr = sg_dma_address(sgde) + split_offset;
2567
2568                         remainder = sg_dma_len(sgde) - split_offset;
2569
2570                         if ((subtotal + remainder) <= protgrp_bytes) {
2571                                 /* we can use this whole buffer */
2572                                 dma_len = remainder;
2573                                 split_offset = 0;
2574
2575                                 if ((subtotal + remainder) == protgrp_bytes)
2576                                         pgdone = 1;
2577                         } else {
2578                                 /* must split this buffer with next prot grp */
2579                                 dma_len = protgrp_bytes - subtotal;
2580                                 split_offset += dma_len;
2581                         }
2582
2583                         subtotal += dma_len;
2584
2585                         sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2586                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2587                         bf_set(lpfc_sli4_sge_last, sgl, 0);
2588                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2589                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2590
2591                         sgl->sge_len = cpu_to_le32(dma_len);
2592                         dma_offset += dma_len;
2593
2594                         num_sge++;
2595                         curr_data++;
2596
2597                         if (split_offset)
2598                                 break;
2599
2600                         /* Move to the next s/g segment if possible */
2601                         sgde = sg_next(sgde);
2602                 }
2603
2604                 if (protgroup_offset) {
2605                         /* update the reference tag */
2606                         reftag += protgrp_blks;
2607                         sgl++;
2608                         continue;
2609                 }
2610
2611                 /* are we done ? */
2612                 if (curr_prot == protcnt) {
2613                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2614                         alldone = 1;
2615                 } else if (curr_prot < protcnt) {
2616                         /* advance to next prot buffer */
2617                         sgpe = sg_next(sgpe);
2618                         sgl++;
2619
2620                         /* update the reference tag */
2621                         reftag += protgrp_blks;
2622                 } else {
2623                         /* if we're here, we have a bug */
2624                         lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2625                                 "9085 BLKGRD: bug in %s\n", __func__);
2626                 }
2627
2628         } while (!alldone);
2629
2630 out:
2631
2632         return num_sge;
2633 }
2634
2635 /**
2636  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2637  * @phba: The Hba for which this call is being executed.
2638  * @sc: pointer to scsi command we're working on
2639  *
2640  * Given a SCSI command that supports DIF, determine composition of protection
2641  * groups involved in setting up buffer lists
2642  *
2643  * Returns: Protection group type (with or without DIF)
2644  *
2645  **/
2646 static int
2647 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2648 {
2649         int ret = LPFC_PG_TYPE_INVALID;
2650         unsigned char op = scsi_get_prot_op(sc);
2651
2652         switch (op) {
2653         case SCSI_PROT_READ_STRIP:
2654         case SCSI_PROT_WRITE_INSERT:
2655                 ret = LPFC_PG_TYPE_NO_DIF;
2656                 break;
2657         case SCSI_PROT_READ_INSERT:
2658         case SCSI_PROT_WRITE_STRIP:
2659         case SCSI_PROT_READ_PASS:
2660         case SCSI_PROT_WRITE_PASS:
2661                 ret = LPFC_PG_TYPE_DIF_BUF;
2662                 break;
2663         default:
2664                 if (phba)
2665                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2666                                         "9021 Unsupported protection op:%d\n",
2667                                         op);
2668                 break;
2669         }
2670         return ret;
2671 }
2672
2673 /**
2674  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2675  * @phba: The Hba for which this call is being executed.
2676  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2677  *
2678  * Adjust the data length to account for how much data
2679  * is actually on the wire.
2680  *
2681  * returns the adjusted data length
2682  **/
2683 static int
2684 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2685                        struct lpfc_scsi_buf *lpfc_cmd)
2686 {
2687         struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2688         int fcpdl;
2689
2690         fcpdl = scsi_bufflen(sc);
2691
2692         /* Check if there is protection data on the wire */
2693         if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2694                 /* Read check for protection data */
2695                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2696                         return fcpdl;
2697
2698         } else {
2699                 /* Write check for protection data */
2700                 if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2701                         return fcpdl;
2702         }
2703
2704         /*
2705          * If we are in DIF Type 1 mode every data block has a 8 byte
2706          * DIF (trailer) attached to it. Must ajust FCP data length
2707          * to account for the protection data.
2708          */
2709         fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2710
2711         return fcpdl;
2712 }
2713
2714 /**
2715  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2716  * @phba: The Hba for which this call is being executed.
2717  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2718  *
2719  * This is the protection/DIF aware version of
2720  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2721  * two functions eventually, but for now, it's here
2722  **/
2723 static int
2724 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2725                 struct lpfc_scsi_buf *lpfc_cmd)
2726 {
2727         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2728         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2729         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2730         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2731         uint32_t num_bde = 0;
2732         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2733         int prot_group_type = 0;
2734         int fcpdl;
2735         struct lpfc_vport *vport = phba->pport;
2736
2737         /*
2738          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2739          *  fcp_rsp regions to the first data bde entry
2740          */
2741         bpl += 2;
2742         if (scsi_sg_count(scsi_cmnd)) {
2743                 /*
2744                  * The driver stores the segment count returned from pci_map_sg
2745                  * because this a count of dma-mappings used to map the use_sg
2746                  * pages.  They are not guaranteed to be the same for those
2747                  * architectures that implement an IOMMU.
2748                  */
2749                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2750                                         scsi_sglist(scsi_cmnd),
2751                                         scsi_sg_count(scsi_cmnd), datadir);
2752                 if (unlikely(!datasegcnt))
2753                         return 1;
2754
2755                 lpfc_cmd->seg_cnt = datasegcnt;
2756
2757                 /* First check if data segment count from SCSI Layer is good */
2758                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2759                         goto err;
2760
2761                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2762
2763                 switch (prot_group_type) {
2764                 case LPFC_PG_TYPE_NO_DIF:
2765
2766                         /* Here we need to add a PDE5 and PDE6 to the count */
2767                         if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2768                                 goto err;
2769
2770                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2771                                         datasegcnt);
2772                         /* we should have 2 or more entries in buffer list */
2773                         if (num_bde < 2)
2774                                 goto err;
2775                         break;
2776
2777                 case LPFC_PG_TYPE_DIF_BUF:
2778                         /*
2779                          * This type indicates that protection buffers are
2780                          * passed to the driver, so that needs to be prepared
2781                          * for DMA
2782                          */
2783                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
2784                                         scsi_prot_sglist(scsi_cmnd),
2785                                         scsi_prot_sg_count(scsi_cmnd), datadir);
2786                         if (unlikely(!protsegcnt)) {
2787                                 scsi_dma_unmap(scsi_cmnd);
2788                                 return 1;
2789                         }
2790
2791                         lpfc_cmd->prot_seg_cnt = protsegcnt;
2792
2793                         /*
2794                          * There is a minimun of 4 BPLs used for every
2795                          * protection data segment.
2796                          */
2797                         if ((lpfc_cmd->prot_seg_cnt * 4) >
2798                             (phba->cfg_total_seg_cnt - 2))
2799                                 goto err;
2800
2801                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2802                                         datasegcnt, protsegcnt);
2803                         /* we should have 3 or more entries in buffer list */
2804                         if ((num_bde < 3) ||
2805                             (num_bde > phba->cfg_total_seg_cnt))
2806                                 goto err;
2807                         break;
2808
2809                 case LPFC_PG_TYPE_INVALID:
2810                 default:
2811                         scsi_dma_unmap(scsi_cmnd);
2812                         lpfc_cmd->seg_cnt = 0;
2813
2814                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2815                                         "9022 Unexpected protection group %i\n",
2816                                         prot_group_type);
2817                         return 1;
2818                 }
2819         }
2820
2821         /*
2822          * Finish initializing those IOCB fields that are dependent on the
2823          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2824          * reinitialized since all iocb memory resources are used many times
2825          * for transmit, receive, and continuation bpl's.
2826          */
2827         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2828         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2829         iocb_cmd->ulpBdeCount = 1;
2830         iocb_cmd->ulpLe = 1;
2831
2832         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2833         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2834
2835         /*
2836          * Due to difference in data length between DIF/non-DIF paths,
2837          * we need to set word 4 of IOCB here
2838          */
2839         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2840
2841         /*
2842          * For First burst, we may need to adjust the initial transfer
2843          * length for DIF
2844          */
2845         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2846             (fcpdl < vport->cfg_first_burst_size))
2847                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2848
2849         return 0;
2850 err:
2851         if (lpfc_cmd->seg_cnt)
2852                 scsi_dma_unmap(scsi_cmnd);
2853         if (lpfc_cmd->prot_seg_cnt)
2854                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2855                              scsi_prot_sg_count(scsi_cmnd),
2856                              scsi_cmnd->sc_data_direction);
2857
2858         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2859                         "9023 Cannot setup S/G List for HBA"
2860                         "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2861                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2862                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2863                         prot_group_type, num_bde);
2864
2865         lpfc_cmd->seg_cnt = 0;
2866         lpfc_cmd->prot_seg_cnt = 0;
2867         return 1;
2868 }
2869
2870 /*
2871  * This function calcuates the T10 DIF guard tag
2872  * on the specified data using a CRC algorithmn
2873  * using crc_t10dif.
2874  */
2875 static uint16_t
2876 lpfc_bg_crc(uint8_t *data, int count)
2877 {
2878         uint16_t crc = 0;
2879         uint16_t x;
2880
2881         crc = crc_t10dif(data, count);
2882         x = cpu_to_be16(crc);
2883         return x;
2884 }
2885
2886 /*
2887  * This function calcuates the T10 DIF guard tag
2888  * on the specified data using a CSUM algorithmn
2889  * using ip_compute_csum.
2890  */
2891 static uint16_t
2892 lpfc_bg_csum(uint8_t *data, int count)
2893 {
2894         uint16_t ret;
2895
2896         ret = ip_compute_csum(data, count);
2897         return ret;
2898 }
2899
2900 /*
2901  * This function examines the protection data to try to determine
2902  * what type of T10-DIF error occurred.
2903  */
2904 static void
2905 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2906 {
2907         struct scatterlist *sgpe; /* s/g prot entry */
2908         struct scatterlist *sgde; /* s/g data entry */
2909         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2910         struct scsi_dif_tuple *src = NULL;
2911         uint8_t *data_src = NULL;
2912         uint16_t guard_tag;
2913         uint16_t start_app_tag, app_tag;
2914         uint32_t start_ref_tag, ref_tag;
2915         int prot, protsegcnt;
2916         int err_type, len, data_len;
2917         int chk_ref, chk_app, chk_guard;
2918         uint16_t sum;
2919         unsigned blksize;
2920
2921         err_type = BGS_GUARD_ERR_MASK;
2922         sum = 0;
2923         guard_tag = 0;
2924
2925         /* First check to see if there is protection data to examine */
2926         prot = scsi_get_prot_op(cmd);
2927         if ((prot == SCSI_PROT_READ_STRIP) ||
2928             (prot == SCSI_PROT_WRITE_INSERT) ||
2929             (prot == SCSI_PROT_NORMAL))
2930                 goto out;
2931
2932         /* Currently the driver just supports ref_tag and guard_tag checking */
2933         chk_ref = 1;
2934         chk_app = 0;
2935         chk_guard = 0;
2936
2937         /* Setup a ptr to the protection data provided by the SCSI host */
2938         sgpe = scsi_prot_sglist(cmd);
2939         protsegcnt = lpfc_cmd->prot_seg_cnt;
2940
2941         if (sgpe && protsegcnt) {
2942
2943                 /*
2944                  * We will only try to verify guard tag if the segment
2945                  * data length is a multiple of the blksize.
2946                  */
2947                 sgde = scsi_sglist(cmd);
2948                 blksize = lpfc_cmd_blksize(cmd);
2949                 data_src = (uint8_t *)sg_virt(sgde);
2950                 data_len = sgde->length;
2951                 if ((data_len & (blksize - 1)) == 0)
2952                         chk_guard = 1;
2953
2954                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2955                 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2956                 start_app_tag = src->app_tag;
2957                 len = sgpe->length;
2958                 while (src && protsegcnt) {
2959                         while (len) {
2960
2961                                 /*
2962                                  * First check to see if a protection data
2963                                  * check is valid
2964                                  */
2965                                 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2966                                     (src->app_tag == T10_PI_APP_ESCAPE)) {
2967                                         start_ref_tag++;
2968                                         goto skipit;
2969                                 }
2970
2971                                 /* First Guard Tag checking */
2972                                 if (chk_guard) {
2973                                         guard_tag = src->guard_tag;
2974                                         if (lpfc_cmd_guard_csum(cmd))
2975                                                 sum = lpfc_bg_csum(data_src,
2976                                                                    blksize);
2977                                         else
2978                                                 sum = lpfc_bg_crc(data_src,
2979                                                                   blksize);
2980                                         if ((guard_tag != sum)) {
2981                                                 err_type = BGS_GUARD_ERR_MASK;
2982                                                 goto out;
2983                                         }
2984                                 }
2985
2986                                 /* Reference Tag checking */
2987                                 ref_tag = be32_to_cpu(src->ref_tag);
2988                                 if (chk_ref && (ref_tag != start_ref_tag)) {
2989                                         err_type = BGS_REFTAG_ERR_MASK;
2990                                         goto out;
2991                                 }
2992                                 start_ref_tag++;
2993
2994                                 /* App Tag checking */
2995                                 app_tag = src->app_tag;
2996                                 if (chk_app && (app_tag != start_app_tag)) {
2997                                         err_type = BGS_APPTAG_ERR_MASK;
2998                                         goto out;
2999                                 }
3000 skipit:
3001                                 len -= sizeof(struct scsi_dif_tuple);
3002                                 if (len < 0)
3003                                         len = 0;
3004                                 src++;
3005
3006                                 data_src += blksize;
3007                                 data_len -= blksize;
3008
3009                                 /*
3010                                  * Are we at the end of the Data segment?
3011                                  * The data segment is only used for Guard
3012                                  * tag checking.
3013                                  */
3014                                 if (chk_guard && (data_len == 0)) {
3015                                         chk_guard = 0;
3016                                         sgde = sg_next(sgde);
3017                                         if (!sgde)
3018                                                 goto out;
3019
3020                                         data_src = (uint8_t *)sg_virt(sgde);
3021                                         data_len = sgde->length;
3022                                         if ((data_len & (blksize - 1)) == 0)
3023                                                 chk_guard = 1;
3024                                 }
3025                         }
3026
3027                         /* Goto the next Protection data segment */
3028                         sgpe = sg_next(sgpe);
3029                         if (sgpe) {
3030                                 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3031                                 len = sgpe->length;
3032                         } else {
3033                                 src = NULL;
3034                         }
3035                         protsegcnt--;
3036                 }
3037         }
3038 out:
3039         if (err_type == BGS_GUARD_ERR_MASK) {
3040                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3041                                         0x10, 0x1);
3042                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3043                               SAM_STAT_CHECK_CONDITION;
3044                 phba->bg_guard_err_cnt++;
3045                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3046                                 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3047                                 (unsigned long)scsi_get_lba(cmd),
3048                                 sum, guard_tag);
3049
3050         } else if (err_type == BGS_REFTAG_ERR_MASK) {
3051                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3052                                         0x10, 0x3);
3053                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3054                               SAM_STAT_CHECK_CONDITION;
3055
3056                 phba->bg_reftag_err_cnt++;
3057                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3058                                 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3059                                 (unsigned long)scsi_get_lba(cmd),
3060                                 ref_tag, start_ref_tag);
3061
3062         } else if (err_type == BGS_APPTAG_ERR_MASK) {
3063                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3064                                         0x10, 0x2);
3065                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3066                               SAM_STAT_CHECK_CONDITION;
3067
3068                 phba->bg_apptag_err_cnt++;
3069                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3070                                 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3071                                 (unsigned long)scsi_get_lba(cmd),
3072                                 app_tag, start_app_tag);
3073         }
3074 }
3075
3076
3077 /*
3078  * This function checks for BlockGuard errors detected by
3079  * the HBA.  In case of errors, the ASC/ASCQ fields in the
3080  * sense buffer will be set accordingly, paired with
3081  * ILLEGAL_REQUEST to signal to the kernel that the HBA
3082  * detected corruption.
3083  *
3084  * Returns:
3085  *  0 - No error found
3086  *  1 - BlockGuard error found
3087  * -1 - Internal error (bad profile, ...etc)
3088  */
3089 static int
3090 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3091                         struct lpfc_iocbq *pIocbOut)
3092 {
3093         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3094         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3095         int ret = 0;
3096         uint32_t bghm = bgf->bghm;
3097         uint32_t bgstat = bgf->bgstat;
3098         uint64_t failing_sector = 0;
3099
3100         spin_lock(&_dump_buf_lock);
3101         if (!_dump_buf_done) {
3102                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
3103                         " Data for %u blocks to debugfs\n",
3104                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3105                 lpfc_debug_save_data(phba, cmd);
3106
3107                 /* If we have a prot sgl, save the DIF buffer */
3108                 if (lpfc_prot_group_type(phba, cmd) ==
3109                                 LPFC_PG_TYPE_DIF_BUF) {
3110                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3111                                 "Saving DIF for %u blocks to debugfs\n",
3112                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3113                         lpfc_debug_save_dif(phba, cmd);
3114                 }
3115
3116                 _dump_buf_done = 1;
3117         }
3118         spin_unlock(&_dump_buf_lock);
3119
3120         if (lpfc_bgs_get_invalid_prof(bgstat)) {
3121                 cmd->result = DID_ERROR << 16;
3122                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3123                                 "9072 BLKGRD: Invalid BG Profile in cmd"
3124                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3125                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3126                                 (unsigned long long)scsi_get_lba(cmd),
3127                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3128                 ret = (-1);
3129                 goto out;
3130         }
3131
3132         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3133                 cmd->result = DID_ERROR << 16;
3134                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3135                                 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3136                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3137                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3138                                 (unsigned long long)scsi_get_lba(cmd),
3139                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3140                 ret = (-1);
3141                 goto out;
3142         }
3143
3144         if (lpfc_bgs_get_guard_err(bgstat)) {
3145                 ret = 1;
3146
3147                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3148                                 0x10, 0x1);
3149                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3150                               SAM_STAT_CHECK_CONDITION;
3151                 phba->bg_guard_err_cnt++;
3152                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3153                                 "9055 BLKGRD: Guard Tag error in cmd"
3154                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3155                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3156                                 (unsigned long long)scsi_get_lba(cmd),
3157                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3158         }
3159
3160         if (lpfc_bgs_get_reftag_err(bgstat)) {
3161                 ret = 1;
3162
3163                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3164                                 0x10, 0x3);
3165                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3166                               SAM_STAT_CHECK_CONDITION;
3167
3168                 phba->bg_reftag_err_cnt++;
3169                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3170                                 "9056 BLKGRD: Ref Tag error in cmd"
3171                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3172                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3173                                 (unsigned long long)scsi_get_lba(cmd),
3174                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3175         }
3176
3177         if (lpfc_bgs_get_apptag_err(bgstat)) {
3178                 ret = 1;
3179
3180                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3181                                 0x10, 0x2);
3182                 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3183                               SAM_STAT_CHECK_CONDITION;
3184
3185                 phba->bg_apptag_err_cnt++;
3186                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3187                                 "9061 BLKGRD: App Tag error in cmd"
3188                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3189                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3190                                 (unsigned long long)scsi_get_lba(cmd),
3191                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3192         }
3193
3194         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3195                 /*
3196                  * setup sense data descriptor 0 per SPC-4 as an information
3197                  * field, and put the failing LBA in it.
3198                  * This code assumes there was also a guard/app/ref tag error
3199                  * indication.
3200                  */
3201                 cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3202                 cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3203                 cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3204                 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3205
3206                 /* bghm is a "on the wire" FC frame based count */
3207                 switch (scsi_get_prot_op(cmd)) {
3208                 case SCSI_PROT_READ_INSERT:
3209                 case SCSI_PROT_WRITE_STRIP:
3210                         bghm /= cmd->device->sector_size;
3211                         break;
3212                 case SCSI_PROT_READ_STRIP:
3213                 case SCSI_PROT_WRITE_INSERT:
3214                 case SCSI_PROT_READ_PASS:
3215                 case SCSI_PROT_WRITE_PASS:
3216                         bghm /= (cmd->device->sector_size +
3217                                 sizeof(struct scsi_dif_tuple));
3218                         break;
3219                 }
3220
3221                 failing_sector = scsi_get_lba(cmd);
3222                 failing_sector += bghm;
3223
3224                 /* Descriptor Information */
3225                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3226         }
3227
3228         if (!ret) {
3229                 /* No error was reported - problem in FW? */
3230                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3231                                 "9057 BLKGRD: Unknown error in cmd"
3232                                 " 0x%x lba 0x%llx blk cnt 0x%x "
3233                                 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3234                                 (unsigned long long)scsi_get_lba(cmd),
3235                                 blk_rq_sectors(cmd->request), bgstat, bghm);
3236
3237                 /* Calcuate what type of error it was */
3238                 lpfc_calc_bg_err(phba, lpfc_cmd);
3239         }
3240 out:
3241         return ret;
3242 }
3243
3244 /**
3245  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3246  * @phba: The Hba for which this call is being executed.
3247  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3248  *
3249  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3250  * field of @lpfc_cmd for device with SLI-4 interface spec.
3251  *
3252  * Return codes:
3253  *      1 - Error
3254  *      0 - Success
3255  **/
3256 static int
3257 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3258 {
3259         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3260         struct scatterlist *sgel = NULL;
3261         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3262         struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3263         struct sli4_sge *first_data_sgl;
3264         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3265         dma_addr_t physaddr;
3266         uint32_t num_bde = 0;
3267         uint32_t dma_len;
3268         uint32_t dma_offset = 0;
3269         int nseg;
3270         struct ulp_bde64 *bde;
3271
3272         /*
3273          * There are three possibilities here - use scatter-gather segment, use
3274          * the single mapping, or neither.  Start the lpfc command prep by
3275          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3276          * data bde entry.
3277          */
3278         if (scsi_sg_count(scsi_cmnd)) {
3279                 /*
3280                  * The driver stores the segment count returned from pci_map_sg
3281                  * because this a count of dma-mappings used to map the use_sg
3282                  * pages.  They are not guaranteed to be the same for those
3283                  * architectures that implement an IOMMU.
3284                  */
3285
3286                 nseg = scsi_dma_map(scsi_cmnd);
3287                 if (unlikely(nseg <= 0))
3288                         return 1;
3289                 sgl += 1;
3290                 /* clear the last flag in the fcp_rsp map entry */
3291                 sgl->word2 = le32_to_cpu(sgl->word2);
3292                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3293                 sgl->word2 = cpu_to_le32(sgl->word2);
3294                 sgl += 1;
3295                 first_data_sgl = sgl;
3296                 lpfc_cmd->seg_cnt = nseg;
3297                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3298                         lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3299                                 " %s: Too many sg segments from "
3300                                 "dma_map_sg.  Config %d, seg_cnt %d\n",
3301                                 __func__, phba->cfg_sg_seg_cnt,
3302                                lpfc_cmd->seg_cnt);
3303                         lpfc_cmd->seg_cnt = 0;
3304                         scsi_dma_unmap(scsi_cmnd);
3305                         return 1;
3306                 }
3307
3308                 /*
3309                  * The driver established a maximum scatter-gather segment count
3310                  * during probe that limits the number of sg elements in any
3311                  * single scsi command.  Just run through the seg_cnt and format
3312                  * the sge's.
3313                  * When using SLI-3 the driver will try to fit all the BDEs into
3314                  * the IOCB. If it can't then the BDEs get added to a BPL as it
3315                  * does for SLI-2 mode.
3316                  */
3317                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3318                         physaddr = sg_dma_address(sgel);
3319                         dma_len = sg_dma_len(sgel);
3320                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3321                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3322                         sgl->word2 = le32_to_cpu(sgl->word2);
3323                         if ((num_bde + 1) == nseg)
3324                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3325                         else
3326                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3327                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3328                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3329                         sgl->word2 = cpu_to_le32(sgl->word2);
3330                         sgl->sge_len = cpu_to_le32(dma_len);
3331                         dma_offset += dma_len;
3332                         sgl++;
3333                 }
3334                 /*
3335                  * Setup the first Payload BDE. For FCoE we just key off
3336                  * Performance Hints, for FC we use lpfc_enable_pbde.
3337                  * We populate words 13-15 of IOCB/WQE.
3338                  */
3339                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3340                     phba->cfg_enable_pbde) {
3341                         bde = (struct ulp_bde64 *)
3342                                 &(iocb_cmd->unsli3.sli3Words[5]);
3343                         bde->addrLow = first_data_sgl->addr_lo;
3344                         bde->addrHigh = first_data_sgl->addr_hi;
3345                         bde->tus.f.bdeSize =
3346                                         le32_to_cpu(first_data_sgl->sge_len);
3347                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3348                         bde->tus.w = cpu_to_le32(bde->tus.w);
3349                 }
3350         } else {
3351                 sgl += 1;
3352                 /* clear the last flag in the fcp_rsp map entry */
3353                 sgl->word2 = le32_to_cpu(sgl->word2);
3354                 bf_set(lpfc_sli4_sge_last, sgl, 1);
3355                 sgl->word2 = cpu_to_le32(sgl->word2);
3356
3357                 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3358                     phba->cfg_enable_pbde) {
3359                         bde = (struct ulp_bde64 *)
3360                                 &(iocb_cmd->unsli3.sli3Words[5]);
3361                         memset(bde, 0, (sizeof(uint32_t) * 3));
3362                 }
3363         }
3364
3365         /*
3366          * Finish initializing those IOCB fields that are dependent on the
3367          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3368          * explicitly reinitialized.
3369          * all iocb memory resources are reused.
3370          */
3371         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3372
3373         /*
3374          * Due to difference in data length between DIF/non-DIF paths,
3375          * we need to set word 4 of IOCB here
3376          */
3377         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3378
3379         /*
3380          * If the OAS driver feature is enabled and the lun is enabled for
3381          * OAS, set the oas iocb related flags.
3382          */
3383         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3384                 scsi_cmnd->device->hostdata)->oas_enabled) {
3385                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3386                 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3387                         scsi_cmnd->device->hostdata)->priority;
3388         }
3389         return 0;
3390 }
3391
3392 /**
3393  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3394  * @phba: The Hba for which this call is being executed.
3395  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3396  *
3397  * This is the protection/DIF aware version of
3398  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3399  * two functions eventually, but for now, it's here
3400  **/
3401 static int
3402 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3403                 struct lpfc_scsi_buf *lpfc_cmd)
3404 {
3405         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3406         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3407         struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3408         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3409         uint32_t num_sge = 0;
3410         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3411         int prot_group_type = 0;
3412         int fcpdl;
3413         struct lpfc_vport *vport = phba->pport;
3414
3415         /*
3416          * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3417          *  fcp_rsp regions to the first data sge entry
3418          */
3419         if (scsi_sg_count(scsi_cmnd)) {
3420                 /*
3421                  * The driver stores the segment count returned from pci_map_sg
3422                  * because this a count of dma-mappings used to map the use_sg
3423                  * pages.  They are not guaranteed to be the same for those
3424                  * architectures that implement an IOMMU.
3425                  */
3426                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3427                                         scsi_sglist(scsi_cmnd),
3428                                         scsi_sg_count(scsi_cmnd), datadir);
3429                 if (unlikely(!datasegcnt))
3430                         return 1;
3431
3432                 sgl += 1;
3433                 /* clear the last flag in the fcp_rsp map entry */
3434                 sgl->word2 = le32_to_cpu(sgl->word2);
3435                 bf_set(lpfc_sli4_sge_last, sgl, 0);
3436                 sgl->word2 = cpu_to_le32(sgl->word2);
3437
3438                 sgl += 1;
3439                 lpfc_cmd->seg_cnt = datasegcnt;
3440
3441                 /* First check if data segment count from SCSI Layer is good */
3442                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3443                         goto err;
3444
3445                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3446
3447                 switch (prot_group_type) {
3448                 case LPFC_PG_TYPE_NO_DIF:
3449                         /* Here we need to add a DISEED to the count */
3450                         if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3451                                 goto err;
3452
3453                         num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3454                                         datasegcnt);
3455
3456                         /* we should have 2 or more entries in buffer list */
3457                         if (num_sge < 2)
3458                                 goto err;
3459                         break;
3460
3461                 case LPFC_PG_TYPE_DIF_BUF:
3462                         /*
3463                          * This type indicates that protection buffers are
3464                          * passed to the driver, so that needs to be prepared
3465                          * for DMA
3466                          */
3467                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
3468                                         scsi_prot_sglist(scsi_cmnd),
3469                                         scsi_prot_sg_count(scsi_cmnd), datadir);
3470                         if (unlikely(!protsegcnt)) {
3471                                 scsi_dma_unmap(scsi_cmnd);
3472                                 return 1;
3473                         }
3474
3475                         lpfc_cmd->prot_seg_cnt = protsegcnt;
3476                         /*
3477                          * There is a minimun of 3 SGEs used for every
3478                          * protection data segment.
3479                          */
3480                         if ((lpfc_cmd->prot_seg_cnt * 3) >
3481                             (phba->cfg_total_seg_cnt - 2))
3482                                 goto err;
3483
3484                         num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3485                                         datasegcnt, protsegcnt);
3486
3487                         /* we should have 3 or more entries in buffer list */
3488                         if ((num_sge < 3) ||
3489                             (num_sge > phba->cfg_total_seg_cnt))
3490                                 goto err;
3491                         break;
3492
3493                 case LPFC_PG_TYPE_INVALID:
3494                 default:
3495                         scsi_dma_unmap(scsi_cmnd);
3496                         lpfc_cmd->seg_cnt = 0;
3497
3498                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3499                                         "9083 Unexpected protection group %i\n",
3500                                         prot_group_type);
3501                         return 1;
3502                 }
3503         }
3504
3505         switch (scsi_get_prot_op(scsi_cmnd)) {
3506         case SCSI_PROT_WRITE_STRIP:
3507         case SCSI_PROT_READ_STRIP:
3508                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3509                 break;
3510         case SCSI_PROT_WRITE_INSERT:
3511         case SCSI_PROT_READ_INSERT:
3512                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3513                 break;
3514         case SCSI_PROT_WRITE_PASS:
3515         case SCSI_PROT_READ_PASS:
3516                 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3517                 break;
3518         }
3519
3520         fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3521         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3522
3523         /*
3524          * Due to difference in data length between DIF/non-DIF paths,
3525          * we need to set word 4 of IOCB here
3526          */
3527         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3528
3529         /*
3530          * For First burst, we may need to adjust the initial transfer
3531          * length for DIF
3532          */
3533         if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3534             (fcpdl < vport->cfg_first_burst_size))
3535                 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3536
3537         /*
3538          * If the OAS driver feature is enabled and the lun is enabled for
3539          * OAS, set the oas iocb related flags.
3540          */
3541         if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3542                 scsi_cmnd->device->hostdata)->oas_enabled)
3543                 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3544
3545         return 0;
3546 err:
3547         if (lpfc_cmd->seg_cnt)
3548                 scsi_dma_unmap(scsi_cmnd);
3549         if (lpfc_cmd->prot_seg_cnt)
3550                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3551                              scsi_prot_sg_count(scsi_cmnd),
3552                              scsi_cmnd->sc_data_direction);
3553
3554         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3555                         "9084 Cannot setup S/G List for HBA"
3556                         "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3557                         lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3558                         phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3559                         prot_group_type, num_sge);
3560
3561         lpfc_cmd->seg_cnt = 0;
3562         lpfc_cmd->prot_seg_cnt = 0;
3563         return 1;
3564 }
3565
3566 /**
3567  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3568  * @phba: The Hba for which this call is being executed.
3569  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3570  *
3571  * This routine wraps the actual DMA mapping function pointer from the
3572  * lpfc_hba struct.
3573  *
3574  * Return codes:
3575  *      1 - Error
3576  *      0 - Success
3577  **/
3578 static inline int
3579 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3580 {
3581         return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3582 }
3583
3584 /**
3585  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3586  * using BlockGuard.
3587  * @phba: The Hba for which this call is being executed.
3588  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3589  *
3590  * This routine wraps the actual DMA mapping function pointer from the
3591  * lpfc_hba struct.
3592  *
3593  * Return codes:
3594  *      1 - Error
3595  *      0 - Success
3596  **/
3597 static inline int
3598 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3599 {
3600         return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3601 }
3602
3603 /**
3604  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3605  * @phba: Pointer to hba context object.
3606  * @vport: Pointer to vport object.
3607  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3608  * @rsp_iocb: Pointer to response iocb object which reported error.
3609  *
3610  * This function posts an event when there is a SCSI command reporting
3611  * error from the scsi device.
3612  **/
3613 static void
3614 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3615                 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3616         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3617         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3618         uint32_t resp_info = fcprsp->rspStatus2;
3619         uint32_t scsi_status = fcprsp->rspStatus3;
3620         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3621         struct lpfc_fast_path_event *fast_path_evt = NULL;
3622         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3623         unsigned long flags;
3624
3625         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3626                 return;
3627
3628         /* If there is queuefull or busy condition send a scsi event */
3629         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3630                 (cmnd->result == SAM_STAT_BUSY)) {
3631                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3632                 if (!fast_path_evt)
3633                         return;
3634                 fast_path_evt->un.scsi_evt.event_type =
3635                         FC_REG_SCSI_EVENT;
3636                 fast_path_evt->un.scsi_evt.subcategory =
3637                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3638                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3639                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3640                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3641                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3642                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3643                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3644         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3645                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3646                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3647                 if (!fast_path_evt)
3648                         return;
3649                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3650                         FC_REG_SCSI_EVENT;
3651                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3652                         LPFC_EVENT_CHECK_COND;
3653                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3654                         cmnd->device->lun;
3655                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3656                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3657                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3658                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3659                 fast_path_evt->un.check_cond_evt.sense_key =
3660                         cmnd->sense_buffer[2] & 0xf;
3661                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3662                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3663         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3664                      fcpi_parm &&
3665                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3666                         ((scsi_status == SAM_STAT_GOOD) &&
3667                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3668                 /*
3669                  * If status is good or resid does not match with fcp_param and
3670                  * there is valid fcpi_parm, then there is a read_check error
3671                  */
3672                 fast_path_evt = lpfc_alloc_fast_evt(phba);
3673                 if (!fast_path_evt)
3674                         return;
3675                 fast_path_evt->un.read_check_error.header.event_type =
3676                         FC_REG_FABRIC_EVENT;
3677                 fast_path_evt->un.read_check_error.header.subcategory =
3678                         LPFC_EVENT_FCPRDCHKERR;
3679                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3680                         &pnode->nlp_portname, sizeof(struct lpfc_name));
3681                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3682                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
3683                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3684                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3685                 fast_path_evt->un.read_check_error.fcpiparam =
3686                         fcpi_parm;
3687         } else
3688                 return;
3689
3690         fast_path_evt->vport = vport;
3691         spin_lock_irqsave(&phba->hbalock, flags);
3692         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3693         spin_unlock_irqrestore(&phba->hbalock, flags);
3694         lpfc_worker_wake_up(phba);
3695         return;
3696 }
3697
3698 /**
3699  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3700  * @phba: The HBA for which this call is being executed.
3701  * @psb: The scsi buffer which is going to be un-mapped.
3702  *
3703  * This routine does DMA un-mapping of scatter gather list of scsi command
3704  * field of @lpfc_cmd for device with SLI-3 interface spec.
3705  **/
3706 static void
3707 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3708 {
3709         /*
3710          * There are only two special cases to consider.  (1) the scsi command
3711          * requested scatter-gather usage or (2) the scsi command allocated
3712          * a request buffer, but did not request use_sg.  There is a third
3713          * case, but it does not require resource deallocation.
3714          */
3715         if (psb->seg_cnt > 0)
3716                 scsi_dma_unmap(psb->pCmd);
3717         if (psb->prot_seg_cnt > 0)
3718                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3719                                 scsi_prot_sg_count(psb->pCmd),
3720                                 psb->pCmd->sc_data_direction);
3721 }
3722
3723 /**
3724  * lpfc_handler_fcp_err - FCP response handler
3725  * @vport: The virtual port for which this call is being executed.
3726  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3727  * @rsp_iocb: The response IOCB which contains FCP error.
3728  *
3729  * This routine is called to process response IOCB with status field
3730  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3731  * based upon SCSI and FCP error.
3732  **/
3733 static void
3734 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3735                     struct lpfc_iocbq *rsp_iocb)
3736 {
3737         struct lpfc_hba *phba = vport->phba;
3738         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3739         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3740         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3741         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3742         uint32_t resp_info = fcprsp->rspStatus2;
3743         uint32_t scsi_status = fcprsp->rspStatus3;
3744         uint32_t *lp;
3745         uint32_t host_status = DID_OK;
3746         uint32_t rsplen = 0;
3747         uint32_t fcpDl;
3748         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3749
3750
3751         /*
3752          *  If this is a task management command, there is no
3753          *  scsi packet associated with this lpfc_cmd.  The driver
3754          *  consumes it.
3755          */
3756         if (fcpcmd->fcpCntl2) {
3757                 scsi_status = 0;
3758                 goto out;
3759         }
3760
3761         if (resp_info & RSP_LEN_VALID) {
3762                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3763                 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3764                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3765                                  "2719 Invalid response length: "
3766                                  "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3767                                  cmnd->device->id,
3768                                  cmnd->device->lun, cmnd->cmnd[0],
3769                                  rsplen);
3770                         host_status = DID_ERROR;
3771                         goto out;
3772                 }
3773                 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3774                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3775                                  "2757 Protocol failure detected during "
3776                                  "processing of FCP I/O op: "
3777                                  "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3778                                  cmnd->device->id,
3779                                  cmnd->device->lun, cmnd->cmnd[0],
3780                                  fcprsp->rspInfo3);
3781                         host_status = DID_ERROR;
3782                         goto out;
3783                 }
3784         }
3785
3786         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3787                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3788                 if (snslen > SCSI_SENSE_BUFFERSIZE)
3789                         snslen = SCSI_SENSE_BUFFERSIZE;
3790
3791                 if (resp_info & RSP_LEN_VALID)
3792                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
3793                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3794         }
3795         lp = (uint32_t *)cmnd->sense_buffer;
3796
3797         /* special handling for under run conditions */
3798         if (!scsi_status && (resp_info & RESID_UNDER)) {
3799                 /* don't log under runs if fcp set... */
3800                 if (vport->cfg_log_verbose & LOG_FCP)
3801                         logit = LOG_FCP_ERROR;
3802                 /* unless operator says so */
3803                 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3804                         logit = LOG_FCP_UNDER;
3805         }
3806
3807         lpfc_printf_vlog(vport, KERN_WARNING, logit,
3808                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
3809                          "Data: x%x x%x x%x x%x x%x\n",
3810                          cmnd->cmnd[0], scsi_status,
3811                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3812                          be32_to_cpu(fcprsp->rspResId),
3813                          be32_to_cpu(fcprsp->rspSnsLen),
3814                          be32_to_cpu(fcprsp->rspRspLen),
3815                          fcprsp->rspInfo3);
3816
3817         scsi_set_resid(cmnd, 0);
3818         fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3819         if (resp_info & RESID_UNDER) {
3820                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3821
3822                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3823                                  "9025 FCP Underrun, expected %d, "
3824                                  "residual %d Data: x%x x%x x%x\n",
3825                                  fcpDl,
3826                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3827                                  cmnd->underflow);
3828
3829                 /*
3830                  * If there is an under run, check if under run reported by
3831                  * storage array is same as the under run reported by HBA.
3832                  * If this is not same, there is a dropped frame.
3833                  */
3834                 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3835                         lpfc_printf_vlog(vport, KERN_WARNING,
3836                                          LOG_FCP | LOG_FCP_ERROR,
3837                                          "9026 FCP Read Check Error "
3838                                          "and Underrun Data: x%x x%x x%x x%x\n",
3839                                          fcpDl,
3840                                          scsi_get_resid(cmnd), fcpi_parm,
3841                                          cmnd->cmnd[0]);
3842                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3843                         host_status = DID_ERROR;
3844                 }
3845                 /*
3846                  * The cmnd->underflow is the minimum number of bytes that must
3847                  * be transferred for this command.  Provided a sense condition
3848                  * is not present, make sure the actual amount transferred is at
3849                  * least the underflow value or fail.
3850                  */
3851                 if (!(resp_info & SNS_LEN_VALID) &&
3852                     (scsi_status == SAM_STAT_GOOD) &&
3853                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3854                      < cmnd->underflow)) {
3855                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3856                                          "9027 FCP command x%x residual "
3857                                          "underrun converted to error "
3858                                          "Data: x%x x%x x%x\n",
3859                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
3860                                          scsi_get_resid(cmnd), cmnd->underflow);
3861                         host_status = DID_ERROR;
3862                 }
3863         } else if (resp_info & RESID_OVER) {
3864                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3865                                  "9028 FCP command x%x residual overrun error. "
3866                                  "Data: x%x x%x\n", cmnd->cmnd[0],
3867                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3868                 host_status = DID_ERROR;
3869
3870         /*
3871          * Check SLI validation that all the transfer was actually done
3872          * (fcpi_parm should be zero). Apply check only to reads.
3873          */
3874         } else if (fcpi_parm) {
3875                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3876                                  "9029 FCP %s Check Error xri x%x  Data: "
3877                                  "x%x x%x x%x x%x x%x\n",
3878                                  ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3879                                  "Read" : "Write"),
3880                                  ((phba->sli_rev == LPFC_SLI_REV4) ?
3881                                  lpfc_cmd->cur_iocbq.sli4_xritag :
3882                                  rsp_iocb->iocb.ulpContext),
3883                                  fcpDl, be32_to_cpu(fcprsp->rspResId),
3884                                  fcpi_parm, cmnd->cmnd[0], scsi_status);
3885
3886                 /* There is some issue with the LPe12000 that causes it
3887                  * to miscalculate the fcpi_parm and falsely trip this
3888                  * recovery logic.  Detect this case and don't error when true.
3889                  */
3890                 if (fcpi_parm > fcpDl)
3891                         goto out;
3892
3893                 switch (scsi_status) {
3894                 case SAM_STAT_GOOD:
3895                 case SAM_STAT_CHECK_CONDITION:
3896                         /* Fabric dropped a data frame. Fail any successful
3897                          * command in which we detected dropped frames.
3898                          * A status of good or some check conditions could
3899                          * be considered a successful command.
3900                          */
3901                         host_status = DID_ERROR;
3902                         break;
3903                 }
3904                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3905         }
3906
3907  out:
3908         cmnd->result = host_status << 16 | scsi_status;
3909         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3910 }
3911
3912 /**
3913  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
3914  * @phba: Pointer to HBA context object.
3915  *
3916  * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
3917  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
3918  * held.
3919  * If scsi-mq is enabled, get the default block layer mapping of software queues
3920  * to hardware queues. This information is saved in request tag.
3921  *
3922  * Return: index into SLI4 fast-path FCP queue index.
3923  **/
3924 int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
3925                                   struct lpfc_scsi_buf *lpfc_cmd)
3926 {
3927         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3928         struct lpfc_vector_map_info *cpup;
3929         int chann, cpu;
3930         uint32_t tag;
3931         uint16_t hwq;
3932
3933         if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
3934                 tag = blk_mq_unique_tag(cmnd->request);
3935                 hwq = blk_mq_unique_tag_to_hwq(tag);
3936
3937                 return hwq;
3938         }
3939
3940         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
3941             && phba->cfg_fcp_io_channel > 1) {
3942                 cpu = smp_processor_id();
3943                 if (cpu < phba->sli4_hba.num_present_cpu) {
3944                         cpup = phba->sli4_hba.cpu_map;
3945                         cpup += cpu;
3946                         return cpup->channel_id;
3947                 }
3948         }
3949         chann = atomic_add_return(1, &phba->fcp_qidx);
3950         chann = chann % phba->cfg_fcp_io_channel;
3951         return chann;
3952 }
3953
3954
3955 /**
3956  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3957  * @phba: The Hba for which this call is being executed.
3958  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3959  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3960  *
3961  * This routine assigns scsi command result by looking into response IOCB
3962  * status field appropriately. This routine handles QUEUE FULL condition as
3963  * well by ramping down device queue depth.
3964  **/
3965 static void
3966 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3967                         struct lpfc_iocbq *pIocbOut)
3968 {
3969         struct lpfc_scsi_buf *lpfc_cmd =
3970                 (struct lpfc_scsi_buf *) pIocbIn->context1;
3971         struct lpfc_vport      *vport = pIocbIn->vport;
3972         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3973         struct lpfc_nodelist *pnode = rdata->pnode;
3974         struct scsi_cmnd *cmd;
3975         unsigned long flags;
3976         struct lpfc_fast_path_event *fast_path_evt;
3977         struct Scsi_Host *shost;
3978         uint32_t logit = LOG_FCP;
3979
3980         atomic_inc(&phba->fc4ScsiIoCmpls);
3981
3982         /* Sanity check on return of outstanding command */
3983         cmd = lpfc_cmd->pCmd;
3984         if (!cmd)
3985                 return;
3986         shost = cmd->device->host;
3987
3988         lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3989         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3990         /* pick up SLI4 exhange busy status from HBA */
3991         lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3992
3993 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3994         if (lpfc_cmd->prot_data_type) {
3995                 struct scsi_dif_tuple *src = NULL;
3996
3997                 src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3998                 /*
3999                  * Used to restore any changes to protection
4000                  * data for error injection.
4001                  */
4002                 switch (lpfc_cmd->prot_data_type) {
4003                 case LPFC_INJERR_REFTAG:
4004                         src->ref_tag =
4005                                 lpfc_cmd->prot_data;
4006                         break;
4007                 case LPFC_INJERR_APPTAG:
4008                         src->app_tag =
4009                                 (uint16_t)lpfc_cmd->prot_data;
4010                         break;
4011                 case LPFC_INJERR_GUARD:
4012                         src->guard_tag =
4013                                 (uint16_t)lpfc_cmd->prot_data;
4014                         break;
4015                 default:
4016                         break;
4017                 }
4018
4019                 lpfc_cmd->prot_data = 0;
4020                 lpfc_cmd->prot_data_type = 0;
4021                 lpfc_cmd->prot_data_segment = NULL;
4022         }
4023 #endif
4024
4025         if (lpfc_cmd->status) {
4026                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4027                     (lpfc_cmd->result & IOERR_DRVR_MASK))
4028                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4029                 else if (lpfc_cmd->status >= IOSTAT_CNT)
4030                         lpfc_cmd->status = IOSTAT_DEFAULT;
4031                 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4032                     !lpfc_cmd->fcp_rsp->rspStatus3 &&
4033                     (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4034                     !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4035                         logit = 0;
4036                 else
4037                         logit = LOG_FCP | LOG_FCP_UNDER;
4038                 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4039                          "9030 FCP cmd x%x failed <%d/%lld> "
4040                          "status: x%x result: x%x "
4041                          "sid: x%x did: x%x oxid: x%x "
4042                          "Data: x%x x%x\n",
4043                          cmd->cmnd[0],
4044                          cmd->device ? cmd->device->id : 0xffff,
4045                          cmd->device ? cmd->device->lun : 0xffff,
4046                          lpfc_cmd->status, lpfc_cmd->result,
4047                          vport->fc_myDID,
4048                          (pnode) ? pnode->nlp_DID : 0,
4049                          phba->sli_rev == LPFC_SLI_REV4 ?
4050                              lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4051                          pIocbOut->iocb.ulpContext,
4052                          lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4053
4054                 switch (lpfc_cmd->status) {
4055                 case IOSTAT_FCP_RSP_ERROR:
4056                         /* Call FCP RSP handler to determine result */
4057                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
4058                         break;
4059                 case IOSTAT_NPORT_BSY:
4060                 case IOSTAT_FABRIC_BSY:
4061                         cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4062                         fast_path_evt = lpfc_alloc_fast_evt(phba);
4063                         if (!fast_path_evt)
4064                                 break;
4065                         fast_path_evt->un.fabric_evt.event_type =
4066                                 FC_REG_FABRIC_EVENT;
4067                         fast_path_evt->un.fabric_evt.subcategory =
4068                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4069                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4070                         if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4071                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4072                                         &pnode->nlp_portname,
4073                                         sizeof(struct lpfc_name));
4074                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4075                                         &pnode->nlp_nodename,
4076                                         sizeof(struct lpfc_name));
4077                         }
4078                         fast_path_evt->vport = vport;
4079                         fast_path_evt->work_evt.evt =
4080                                 LPFC_EVT_FASTPATH_MGMT_EVT;
4081                         spin_lock_irqsave(&phba->hbalock, flags);
4082                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
4083                                 &phba->work_list);
4084                         spin_unlock_irqrestore(&phba->hbalock, flags);
4085                         lpfc_worker_wake_up(phba);
4086                         break;
4087                 case IOSTAT_LOCAL_REJECT:
4088                 case IOSTAT_REMOTE_STOP:
4089                         if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4090                             lpfc_cmd->result ==
4091                                         IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4092                             lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4093                             lpfc_cmd->result ==
4094                                         IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4095                                 cmd->result = DID_NO_CONNECT << 16;
4096                                 break;
4097                         }
4098                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4099                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
4100                             lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4101                             lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4102                                 cmd->result = DID_REQUEUE << 16;
4103                                 break;
4104                         }
4105                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4106                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4107                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4108                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4109                                         /*
4110                                          * This is a response for a BG enabled
4111                                          * cmd. Parse BG error
4112                                          */
4113                                         lpfc_parse_bg_err(phba, lpfc_cmd,
4114                                                         pIocbOut);
4115                                         break;
4116                                 } else {
4117                                         lpfc_printf_vlog(vport, KERN_WARNING,
4118                                                         LOG_BG,
4119                                                         "9031 non-zero BGSTAT "
4120                                                         "on unprotected cmd\n");
4121                                 }
4122                         }
4123                         if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4124                                 && (phba->sli_rev == LPFC_SLI_REV4)
4125                                 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
4126                                 /* This IO was aborted by the target, we don't
4127                                  * know the rxid and because we did not send the
4128                                  * ABTS we cannot generate and RRQ.
4129                                  */
4130                                 lpfc_set_rrq_active(phba, pnode,
4131                                         lpfc_cmd->cur_iocbq.sli4_lxritag,
4132                                         0, 0);
4133                         }
4134                 /* else: fall through */
4135                 default:
4136                         cmd->result = DID_ERROR << 16;
4137                         break;
4138                 }
4139
4140                 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4141                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4142                         cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4143                                       SAM_STAT_BUSY;
4144         } else
4145                 cmd->result = DID_OK << 16;
4146
4147         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4148                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4149
4150                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4151                                  "0710 Iodone <%d/%llu> cmd %p, error "
4152                                  "x%x SNS x%x x%x Data: x%x x%x\n",
4153                                  cmd->device->id, cmd->device->lun, cmd,
4154                                  cmd->result, *lp, *(lp + 3), cmd->retries,
4155                                  scsi_get_resid(cmd));
4156         }
4157
4158         lpfc_update_stats(phba, lpfc_cmd);
4159         if (vport->cfg_max_scsicmpl_time &&
4160            time_after(jiffies, lpfc_cmd->start_time +
4161                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4162                 spin_lock_irqsave(shost->host_lock, flags);
4163                 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4164                         if (pnode->cmd_qdepth >
4165                                 atomic_read(&pnode->cmd_pending) &&
4166                                 (atomic_read(&pnode->cmd_pending) >
4167                                 LPFC_MIN_TGT_QDEPTH) &&
4168                                 ((cmd->cmnd[0] == READ_10) ||
4169                                 (cmd->cmnd[0] == WRITE_10)))
4170                                 pnode->cmd_qdepth =
4171                                         atomic_read(&pnode->cmd_pending);
4172
4173                         pnode->last_change_time = jiffies;
4174                 }
4175                 spin_unlock_irqrestore(shost->host_lock, flags);
4176         }
4177         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4178
4179         /* If pCmd was set to NULL from abort path, do not call scsi_done */
4180         if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
4181                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4182                                  "5688 FCP cmd already NULL, sid: 0x%06x, "
4183                                  "did: 0x%06x, oxid: 0x%04x\n",
4184                                  vport->fc_myDID,
4185                                  (pnode) ? pnode->nlp_DID : 0,
4186                                  phba->sli_rev == LPFC_SLI_REV4 ?
4187                                  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
4188                 return;
4189         }
4190
4191         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4192         cmd->scsi_done(cmd);
4193
4194         /*
4195          * If there is a thread waiting for command completion
4196          * wake up the thread.
4197          */
4198         spin_lock_irqsave(shost->host_lock, flags);
4199         if (lpfc_cmd->waitq)
4200                 wake_up(lpfc_cmd->waitq);
4201         spin_unlock_irqrestore(shost->host_lock, flags);
4202
4203         lpfc_release_scsi_buf(phba, lpfc_cmd);
4204 }
4205
4206 /**
4207  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4208  * @data: A pointer to the immediate command data portion of the IOCB.
4209  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4210  *
4211  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4212  * byte swapping the data to big endian format for transmission on the wire.
4213  **/
4214 static void
4215 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4216 {
4217         int i, j;
4218         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4219              i += sizeof(uint32_t), j++) {
4220                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4221         }
4222 }
4223
4224 /**
4225  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4226  * @vport: The virtual port for which this call is being executed.
4227  * @lpfc_cmd: The scsi command which needs to send.
4228  * @pnode: Pointer to lpfc_nodelist.
4229  *
4230  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4231  * to transfer for device with SLI3 interface spec.
4232  **/
4233 static void
4234 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4235                     struct lpfc_nodelist *pnode)
4236 {
4237         struct lpfc_hba *phba = vport->phba;
4238         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4239         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4240         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4241         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4242         int datadir = scsi_cmnd->sc_data_direction;
4243         uint8_t *ptr;
4244         bool sli4;
4245         uint32_t fcpdl;
4246
4247         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4248                 return;
4249
4250         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4251         /* clear task management bits */
4252         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4253
4254         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4255                         &lpfc_cmd->fcp_cmnd->fcp_lun);
4256
4257         ptr = &fcp_cmnd->fcpCdb[0];
4258         memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4259         if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4260                 ptr += scsi_cmnd->cmd_len;
4261                 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4262         }
4263
4264         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4265
4266         sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4267         piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4268
4269         /*
4270          * There are three possibilities here - use scatter-gather segment, use
4271          * the single mapping, or neither.  Start the lpfc command prep by
4272          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4273          * data bde entry.
4274          */
4275         if (scsi_sg_count(scsi_cmnd)) {
4276                 if (datadir == DMA_TO_DEVICE) {
4277                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4278                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4279                         if (vport->cfg_first_burst_size &&
4280                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
4281                                 fcpdl = scsi_bufflen(scsi_cmnd);
4282                                 if (fcpdl < vport->cfg_first_burst_size)
4283                                         piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4284                                 else
4285                                         piocbq->iocb.un.fcpi.fcpi_XRdy =
4286                                                 vport->cfg_first_burst_size;
4287                         }
4288                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
4289                         atomic_inc(&phba->fc4ScsiOutputRequests);
4290                 } else {
4291                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4292                         iocb_cmd->ulpPU = PARM_READ_CHECK;
4293                         fcp_cmnd->fcpCntl3 = READ_DATA;
4294                         atomic_inc(&phba->fc4ScsiInputRequests);
4295                 }
4296         } else {
4297                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4298                 iocb_cmd->un.fcpi.fcpi_parm = 0;
4299                 iocb_cmd->ulpPU = 0;
4300                 fcp_cmnd->fcpCntl3 = 0;
4301                 atomic_inc(&phba->fc4ScsiControlRequests);
4302         }
4303         if (phba->sli_rev == 3 &&
4304             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4305                 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4306         /*
4307          * Finish initializing those IOCB fields that are independent
4308          * of the scsi_cmnd request_buffer
4309          */
4310         piocbq->iocb.ulpContext = pnode->nlp_rpi;
4311         if (sli4)
4312                 piocbq->iocb.ulpContext =
4313                   phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4314         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4315                 piocbq->iocb.ulpFCP2Rcvy = 1;
4316         else
4317                 piocbq->iocb.ulpFCP2Rcvy = 0;
4318
4319         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4320         piocbq->context1  = lpfc_cmd;
4321         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4322         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4323         piocbq->vport = vport;
4324 }
4325
4326 /**
4327  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4328  * @vport: The virtual port for which this call is being executed.
4329  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4330  * @lun: Logical unit number.
4331  * @task_mgmt_cmd: SCSI task management command.
4332  *
4333  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4334  * for device with SLI-3 interface spec.
4335  *
4336  * Return codes:
4337  *   0 - Error
4338  *   1 - Success
4339  **/
4340 static int
4341 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4342                              struct lpfc_scsi_buf *lpfc_cmd,
4343                              uint64_t lun,
4344                              uint8_t task_mgmt_cmd)
4345 {
4346         struct lpfc_iocbq *piocbq;
4347         IOCB_t *piocb;
4348         struct fcp_cmnd *fcp_cmnd;
4349         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4350         struct lpfc_nodelist *ndlp = rdata->pnode;
4351
4352         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4353             ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4354                 return 0;
4355
4356         piocbq = &(lpfc_cmd->cur_iocbq);
4357         piocbq->vport = vport;
4358
4359         piocb = &piocbq->iocb;
4360
4361         fcp_cmnd = lpfc_cmd->fcp_cmnd;
4362         /* Clear out any old data in the FCP command area */
4363         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4364         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4365         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4366         if (vport->phba->sli_rev == 3 &&
4367             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4368                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4369         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4370         piocb->ulpContext = ndlp->nlp_rpi;
4371         if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4372                 piocb->ulpContext =
4373                   vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4374         }
4375         piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4376         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4377         piocb->ulpPU = 0;
4378         piocb->un.fcpi.fcpi_parm = 0;
4379
4380         /* ulpTimeout is only one byte */
4381         if (lpfc_cmd->timeout > 0xff) {
4382                 /*
4383                  * Do not timeout the command at the firmware level.
4384                  * The driver will provide the timeout mechanism.
4385                  */
4386                 piocb->ulpTimeout = 0;
4387         } else
4388                 piocb->ulpTimeout = lpfc_cmd->timeout;
4389
4390         if (vport->phba->sli_rev == LPFC_SLI_REV4)
4391                 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4392
4393         return 1;
4394 }
4395
4396 /**
4397  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4398  * @phba: The hba struct for which this call is being executed.
4399  * @dev_grp: The HBA PCI-Device group number.
4400  *
4401  * This routine sets up the SCSI interface API function jump table in @phba
4402  * struct.
4403  * Returns: 0 - success, -ENODEV - failure.
4404  **/
4405 int
4406 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4407 {
4408
4409         phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4410         phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4411
4412         switch (dev_grp) {
4413         case LPFC_PCI_DEV_LP:
4414                 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4415                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4416                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4417                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4418                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4419                 break;
4420         case LPFC_PCI_DEV_OC:
4421                 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4422                 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4423                 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4424                 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4425                 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4426                 break;
4427         default:
4428                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4429                                 "1418 Invalid HBA PCI-device group: 0x%x\n",
4430                                 dev_grp);
4431                 return -ENODEV;
4432                 break;
4433         }
4434         phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4435         phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4436         return 0;
4437 }
4438
4439 /**
4440  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4441  * @phba: The Hba for which this call is being executed.
4442  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4443  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4444  *
4445  * This routine is IOCB completion routine for device reset and target reset
4446  * routine. This routine release scsi buffer associated with lpfc_cmd.
4447  **/
4448 static void
4449 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4450                         struct lpfc_iocbq *cmdiocbq,
4451                         struct lpfc_iocbq *rspiocbq)
4452 {
4453         struct lpfc_scsi_buf *lpfc_cmd =
4454                 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4455         if (lpfc_cmd)
4456                 lpfc_release_scsi_buf(phba, lpfc_cmd);
4457         return;
4458 }
4459
4460 /**
4461  * lpfc_info - Info entry point of scsi_host_template data structure
4462  * @host: The scsi host for which this call is being executed.
4463  *
4464  * This routine provides module information about hba.
4465  *
4466  * Reutrn code:
4467  *   Pointer to char - Success.
4468  **/
4469 const char *
4470 lpfc_info(struct Scsi_Host *host)
4471 {
4472         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4473         struct lpfc_hba   *phba = vport->phba;
4474         int len, link_speed = 0;
4475         static char  lpfcinfobuf[384];
4476
4477         memset(lpfcinfobuf,0,384);
4478         if (phba && phba->pcidev){
4479                 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4480                 len = strlen(lpfcinfobuf);
4481                 snprintf(lpfcinfobuf + len,
4482                         384-len,
4483                         " on PCI bus %02x device %02x irq %d",
4484                         phba->pcidev->bus->number,
4485                         phba->pcidev->devfn,
4486                         phba->pcidev->irq);
4487                 len = strlen(lpfcinfobuf);
4488                 if (phba->Port[0]) {
4489                         snprintf(lpfcinfobuf + len,
4490                                  384-len,
4491                                  " port %s",
4492                                  phba->Port);
4493                 }
4494                 len = strlen(lpfcinfobuf);
4495                 link_speed = lpfc_sli_port_speed_get(phba);
4496                 if (link_speed != 0)
4497                         snprintf(lpfcinfobuf + len, 384-len,
4498                                  " Logical Link Speed: %d Mbps", link_speed);
4499         }
4500         return lpfcinfobuf;
4501 }
4502
4503 /**
4504  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4505  * @phba: The Hba for which this call is being executed.
4506  *
4507  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4508  * The default value of cfg_poll_tmo is 10 milliseconds.
4509  **/
4510 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4511 {
4512         unsigned long  poll_tmo_expires =
4513                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4514
4515         if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4516                 mod_timer(&phba->fcp_poll_timer,
4517                           poll_tmo_expires);
4518 }
4519
4520 /**
4521  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4522  * @phba: The Hba for which this call is being executed.
4523  *
4524  * This routine starts the fcp_poll_timer of @phba.
4525  **/
4526 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4527 {
4528         lpfc_poll_rearm_timer(phba);
4529 }
4530
4531 /**
4532  * lpfc_poll_timeout - Restart polling timer
4533  * @ptr: Map to lpfc_hba data structure pointer.
4534  *
4535  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4536  * and FCP Ring interrupt is disable.
4537  **/
4538
4539 void lpfc_poll_timeout(struct timer_list *t)
4540 {
4541         struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4542
4543         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4544                 lpfc_sli_handle_fast_ring_event(phba,
4545                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4546
4547                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4548                         lpfc_poll_rearm_timer(phba);
4549         }
4550 }
4551
4552 /**
4553  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4554  * @cmnd: Pointer to scsi_cmnd data structure.
4555  * @done: Pointer to done routine.
4556  *
4557  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4558  * This routine prepares an IOCB from scsi command and provides to firmware.
4559  * The @done callback is invoked after driver finished processing the command.
4560  *
4561  * Return value :
4562  *   0 - Success
4563  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4564  **/
4565 static int
4566 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4567 {
4568         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4569         struct lpfc_hba   *phba = vport->phba;
4570         struct lpfc_rport_data *rdata;
4571         struct lpfc_nodelist *ndlp;
4572         struct lpfc_scsi_buf *lpfc_cmd;
4573         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4574         int err;
4575
4576         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4577
4578         /* sanity check on references */
4579         if (unlikely(!rdata) || unlikely(!rport))
4580                 goto out_fail_command;
4581
4582         err = fc_remote_port_chkready(rport);
4583         if (err) {
4584                 cmnd->result = err;
4585                 goto out_fail_command;
4586         }
4587         ndlp = rdata->pnode;
4588
4589         if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4590                 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4591
4592                 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4593                                 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4594                                 " op:%02x str=%s without registering for"
4595                                 " BlockGuard - Rejecting command\n",
4596                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4597                                 dif_op_str[scsi_get_prot_op(cmnd)]);
4598                 goto out_fail_command;
4599         }
4600
4601         /*
4602          * Catch race where our node has transitioned, but the
4603          * transport is still transitioning.
4604          */
4605         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4606                 goto out_tgt_busy;
4607         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4608                 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4609                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4610                                          "3377 Target Queue Full, scsi Id:%d "
4611                                          "Qdepth:%d Pending command:%d"
4612                                          " WWNN:%02x:%02x:%02x:%02x:"
4613                                          "%02x:%02x:%02x:%02x, "
4614                                          " WWPN:%02x:%02x:%02x:%02x:"
4615                                          "%02x:%02x:%02x:%02x",
4616                                          ndlp->nlp_sid, ndlp->cmd_qdepth,
4617                                          atomic_read(&ndlp->cmd_pending),
4618                                          ndlp->nlp_nodename.u.wwn[0],
4619                                          ndlp->nlp_nodename.u.wwn[1],
4620                                          ndlp->nlp_nodename.u.wwn[2],
4621                                          ndlp->nlp_nodename.u.wwn[3],
4622                                          ndlp->nlp_nodename.u.wwn[4],
4623                                          ndlp->nlp_nodename.u.wwn[5],
4624                                          ndlp->nlp_nodename.u.wwn[6],
4625                                          ndlp->nlp_nodename.u.wwn[7],
4626                                          ndlp->nlp_portname.u.wwn[0],
4627                                          ndlp->nlp_portname.u.wwn[1],
4628                                          ndlp->nlp_portname.u.wwn[2],
4629                                          ndlp->nlp_portname.u.wwn[3],
4630                                          ndlp->nlp_portname.u.wwn[4],
4631                                          ndlp->nlp_portname.u.wwn[5],
4632                                          ndlp->nlp_portname.u.wwn[6],
4633                                          ndlp->nlp_portname.u.wwn[7]);
4634                         goto out_tgt_busy;
4635                 }
4636         }
4637
4638         lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4639         if (lpfc_cmd == NULL) {
4640                 lpfc_rampdown_queue_depth(phba);
4641
4642                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4643                                  "0707 driver's buffer pool is empty, "
4644                                  "IO busied\n");
4645                 goto out_host_busy;
4646         }
4647
4648         /*
4649          * Store the midlayer's command structure for the completion phase
4650          * and complete the command initialization.
4651          */
4652         lpfc_cmd->pCmd  = cmnd;
4653         lpfc_cmd->rdata = rdata;
4654         lpfc_cmd->ndlp = ndlp;
4655         lpfc_cmd->timeout = 0;
4656         lpfc_cmd->start_time = jiffies;
4657         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4658
4659         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4660                 if (vport->phba->cfg_enable_bg) {
4661                         lpfc_printf_vlog(vport,
4662                                          KERN_INFO, LOG_SCSI_CMD,
4663                                          "9033 BLKGRD: rcvd %s cmd:x%x "
4664                                          "sector x%llx cnt %u pt %x\n",
4665                                          dif_op_str[scsi_get_prot_op(cmnd)],
4666                                          cmnd->cmnd[0],
4667                                          (unsigned long long)scsi_get_lba(cmnd),
4668                                          blk_rq_sectors(cmnd->request),
4669                                          (cmnd->cmnd[1]>>5));
4670                 }
4671                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4672         } else {
4673                 if (vport->phba->cfg_enable_bg) {
4674                         lpfc_printf_vlog(vport,
4675                                          KERN_INFO, LOG_SCSI_CMD,
4676                                          "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4677                                          "x%x sector x%llx cnt %u pt %x\n",
4678                                          cmnd->cmnd[0],
4679                                          (unsigned long long)scsi_get_lba(cmnd),
4680                                          blk_rq_sectors(cmnd->request),
4681                                          (cmnd->cmnd[1]>>5));
4682                 }
4683                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4684         }
4685
4686         if (err)
4687                 goto out_host_busy_free_buf;
4688
4689         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4690
4691         err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4692                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4693         if (err) {
4694                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4695                                  "3376 FCP could not issue IOCB err %x"
4696                                  "FCP cmd x%x <%d/%llu> "
4697                                  "sid: x%x did: x%x oxid: x%x "
4698                                  "Data: x%x x%x x%x x%x\n",
4699                                  err, cmnd->cmnd[0],
4700                                  cmnd->device ? cmnd->device->id : 0xffff,
4701                                  cmnd->device ? cmnd->device->lun : (u64) -1,
4702                                  vport->fc_myDID, ndlp->nlp_DID,
4703                                  phba->sli_rev == LPFC_SLI_REV4 ?
4704                                  lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4705                                  lpfc_cmd->cur_iocbq.iocb.ulpContext,
4706                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4707                                  lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4708                                  (uint32_t)
4709                                  (cmnd->request->timeout / 1000));
4710
4711                 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4712                 case WRITE_DATA:
4713                         atomic_dec(&phba->fc4ScsiOutputRequests);
4714                         break;
4715                 case READ_DATA:
4716                         atomic_dec(&phba->fc4ScsiInputRequests);
4717                         break;
4718                 default:
4719                         atomic_dec(&phba->fc4ScsiControlRequests);
4720                 }
4721                 goto out_host_busy_free_buf;
4722         }
4723         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4724                 lpfc_sli_handle_fast_ring_event(phba,
4725                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4726
4727                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4728                         lpfc_poll_rearm_timer(phba);
4729         }
4730
4731         return 0;
4732
4733  out_host_busy_free_buf:
4734         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4735         lpfc_release_scsi_buf(phba, lpfc_cmd);
4736  out_host_busy:
4737         return SCSI_MLQUEUE_HOST_BUSY;
4738
4739  out_tgt_busy:
4740         return SCSI_MLQUEUE_TARGET_BUSY;
4741
4742  out_fail_command:
4743         cmnd->scsi_done(cmnd);
4744         return 0;
4745 }
4746
4747
4748 /**
4749  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4750  * @cmnd: Pointer to scsi_cmnd data structure.
4751  *
4752  * This routine aborts @cmnd pending in base driver.
4753  *
4754  * Return code :
4755  *   0x2003 - Error
4756  *   0x2002 - Success
4757  **/
4758 static int
4759 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4760 {
4761         struct Scsi_Host  *shost = cmnd->device->host;
4762         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4763         struct lpfc_hba   *phba = vport->phba;
4764         struct lpfc_iocbq *iocb;
4765         struct lpfc_iocbq *abtsiocb;
4766         struct lpfc_scsi_buf *lpfc_cmd;
4767         IOCB_t *cmd, *icmd;
4768         int ret = SUCCESS, status = 0;
4769         struct lpfc_sli_ring *pring_s4 = NULL;
4770         int ret_val;
4771         unsigned long flags;
4772         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4773
4774         status = fc_block_scsi_eh(cmnd);
4775         if (status != 0 && status != SUCCESS)
4776                 return status;
4777
4778         spin_lock_irqsave(&phba->hbalock, flags);
4779         /* driver queued commands are in process of being flushed */
4780         if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4781                 spin_unlock_irqrestore(&phba->hbalock, flags);
4782                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4783                         "3168 SCSI Layer abort requested I/O has been "
4784                         "flushed by LLD.\n");
4785                 return FAILED;
4786         }
4787
4788         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4789         if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4790                 spin_unlock_irqrestore(&phba->hbalock, flags);
4791                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4792                          "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4793                          "x%x ID %d LUN %llu\n",
4794                          SUCCESS, cmnd->device->id, cmnd->device->lun);
4795                 return SUCCESS;
4796         }
4797
4798         iocb = &lpfc_cmd->cur_iocbq;
4799         if (phba->sli_rev == LPFC_SLI_REV4) {
4800                 if (!(phba->cfg_fof) ||
4801                     (!(iocb->iocb_flag & LPFC_IO_FOF))) {
4802                         pring_s4 =
4803                                 phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
4804                 } else {
4805                         iocb->hba_wqidx = 0;
4806                         pring_s4 = phba->sli4_hba.oas_wq->pring;
4807                 }
4808                 if (!pring_s4) {
4809                         ret = FAILED;
4810                         goto out_unlock;
4811                 }
4812                 spin_lock(&pring_s4->ring_lock);
4813         }
4814         /* the command is in process of being cancelled */
4815         if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4816                 if (phba->sli_rev == LPFC_SLI_REV4)
4817                         spin_unlock(&pring_s4->ring_lock);
4818                 spin_unlock_irqrestore(&phba->hbalock, flags);
4819                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4820                         "3169 SCSI Layer abort requested I/O has been "
4821                         "cancelled by LLD.\n");
4822                 return FAILED;
4823         }
4824         /*
4825          * If pCmd field of the corresponding lpfc_scsi_buf structure
4826          * points to a different SCSI command, then the driver has
4827          * already completed this command, but the midlayer did not
4828          * see the completion before the eh fired. Just return SUCCESS.
4829          */
4830         if (lpfc_cmd->pCmd != cmnd) {
4831                 if (phba->sli_rev == LPFC_SLI_REV4)
4832                         spin_unlock(&pring_s4->ring_lock);
4833                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4834                         "3170 SCSI Layer abort requested I/O has been "
4835                         "completed by LLD.\n");
4836                 goto out_unlock;
4837         }
4838
4839         BUG_ON(iocb->context1 != lpfc_cmd);
4840
4841         /* abort issued in recovery is still in progress */
4842         if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4843                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4844                          "3389 SCSI Layer I/O Abort Request is pending\n");
4845                 if (phba->sli_rev == LPFC_SLI_REV4)
4846                         spin_unlock(&pring_s4->ring_lock);
4847                 spin_unlock_irqrestore(&phba->hbalock, flags);
4848                 goto wait_for_cmpl;
4849         }
4850
4851         abtsiocb = __lpfc_sli_get_iocbq(phba);
4852         if (abtsiocb == NULL) {
4853                 ret = FAILED;
4854                 if (phba->sli_rev == LPFC_SLI_REV4)
4855                         spin_unlock(&pring_s4->ring_lock);
4856                 goto out_unlock;
4857         }
4858
4859         /* Indicate the IO is being aborted by the driver. */
4860         iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4861
4862         /*
4863          * The scsi command can not be in txq and it is in flight because the
4864          * pCmd is still pointig at the SCSI command we have to abort. There
4865          * is no need to search the txcmplq. Just send an abort to the FW.
4866          */
4867
4868         cmd = &iocb->iocb;
4869         icmd = &abtsiocb->iocb;
4870         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4871         icmd->un.acxri.abortContextTag = cmd->ulpContext;
4872         if (phba->sli_rev == LPFC_SLI_REV4)
4873                 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4874         else
4875                 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4876
4877         icmd->ulpLe = 1;
4878         icmd->ulpClass = cmd->ulpClass;
4879
4880         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4881         abtsiocb->hba_wqidx = iocb->hba_wqidx;
4882         abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4883         if (iocb->iocb_flag & LPFC_IO_FOF)
4884                 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4885
4886         if (lpfc_is_link_up(phba))
4887                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4888         else
4889                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4890
4891         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4892         abtsiocb->vport = vport;
4893         lpfc_cmd->waitq = &waitq;
4894         if (phba->sli_rev == LPFC_SLI_REV4) {
4895                 /* Note: both hbalock and ring_lock must be set here */
4896                 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4897                                                 abtsiocb, 0);
4898                 spin_unlock(&pring_s4->ring_lock);
4899         } else {
4900                 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4901                                                 abtsiocb, 0);
4902         }
4903         /* no longer need the lock after this point */
4904         spin_unlock_irqrestore(&phba->hbalock, flags);
4905
4906
4907         if (ret_val == IOCB_ERROR) {
4908                 if (phba->sli_rev == LPFC_SLI_REV4)
4909                         spin_lock_irqsave(&pring_s4->ring_lock, flags);
4910                 else
4911                         spin_lock_irqsave(&phba->hbalock, flags);
4912                 /* Indicate the IO is not being aborted by the driver. */
4913                 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4914                 lpfc_cmd->waitq = NULL;
4915                 if (phba->sli_rev == LPFC_SLI_REV4)
4916                         spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
4917                 else
4918                         spin_unlock_irqrestore(&phba->hbalock, flags);
4919                 lpfc_sli_release_iocbq(phba, abtsiocb);
4920                 ret = FAILED;
4921                 goto out;
4922         }
4923
4924         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4925                 lpfc_sli_handle_fast_ring_event(phba,
4926                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4927
4928 wait_for_cmpl:
4929         /* Wait for abort to complete */
4930         wait_event_timeout(waitq,
4931                           (lpfc_cmd->pCmd != cmnd),
4932                            msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4933
4934         spin_lock_irqsave(shost->host_lock, flags);
4935         lpfc_cmd->waitq = NULL;
4936         spin_unlock_irqrestore(shost->host_lock, flags);
4937
4938         if (lpfc_cmd->pCmd == cmnd) {
4939                 ret = FAILED;
4940                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4941                                  "0748 abort handler timed out waiting "
4942                                  "for aborting I/O (xri:x%x) to complete: "
4943                                  "ret %#x, ID %d, LUN %llu\n",
4944                                  iocb->sli4_xritag, ret,
4945                                  cmnd->device->id, cmnd->device->lun);
4946         }
4947         goto out;
4948
4949 out_unlock:
4950         spin_unlock_irqrestore(&phba->hbalock, flags);
4951 out:
4952         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4953                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4954                          "LUN %llu\n", ret, cmnd->device->id,
4955                          cmnd->device->lun);
4956         return ret;
4957 }
4958
4959 static char *
4960 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4961 {
4962         switch (task_mgmt_cmd) {
4963         case FCP_ABORT_TASK_SET:
4964                 return "ABORT_TASK_SET";
4965         case FCP_CLEAR_TASK_SET:
4966                 return "FCP_CLEAR_TASK_SET";
4967         case FCP_BUS_RESET:
4968                 return "FCP_BUS_RESET";
4969         case FCP_LUN_RESET:
4970                 return "FCP_LUN_RESET";
4971         case FCP_TARGET_RESET:
4972                 return "FCP_TARGET_RESET";
4973         case FCP_CLEAR_ACA:
4974                 return "FCP_CLEAR_ACA";
4975         case FCP_TERMINATE_TASK:
4976                 return "FCP_TERMINATE_TASK";
4977         default:
4978                 return "unknown";
4979         }
4980 }
4981
4982
4983 /**
4984  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4985  * @vport: The virtual port for which this call is being executed.
4986  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4987  *
4988  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4989  *
4990  * Return code :
4991  *   0x2003 - Error
4992  *   0x2002 - Success
4993  **/
4994 static int
4995 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4996 {
4997         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4998         uint32_t rsp_info;
4999         uint32_t rsp_len;
5000         uint8_t  rsp_info_code;
5001         int ret = FAILED;
5002
5003
5004         if (fcprsp == NULL)
5005                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5006                                  "0703 fcp_rsp is missing\n");
5007         else {
5008                 rsp_info = fcprsp->rspStatus2;
5009                 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5010                 rsp_info_code = fcprsp->rspInfo3;
5011
5012
5013                 lpfc_printf_vlog(vport, KERN_INFO,
5014                                  LOG_FCP,
5015                                  "0706 fcp_rsp valid 0x%x,"
5016                                  " rsp len=%d code 0x%x\n",
5017                                  rsp_info,
5018                                  rsp_len, rsp_info_code);
5019
5020                 if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
5021                         switch (rsp_info_code) {
5022                         case RSP_NO_FAILURE:
5023                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5024                                                  "0715 Task Mgmt No Failure\n");
5025                                 ret = SUCCESS;
5026                                 break;
5027                         case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5028                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5029                                                  "0716 Task Mgmt Target "
5030                                                 "reject\n");
5031                                 break;
5032                         case RSP_TM_NOT_COMPLETED: /* TM failed */
5033                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5034                                                  "0717 Task Mgmt Target "
5035                                                 "failed TM\n");
5036                                 break;
5037                         case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5038                                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5039                                                  "0718 Task Mgmt to invalid "
5040                                                 "LUN\n");
5041                                 break;
5042                         }
5043                 }
5044         }
5045         return ret;
5046 }
5047
5048
5049 /**
5050  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5051  * @vport: The virtual port for which this call is being executed.
5052  * @rdata: Pointer to remote port local data
5053  * @tgt_id: Target ID of remote device.
5054  * @lun_id: Lun number for the TMF
5055  * @task_mgmt_cmd: type of TMF to send
5056  *
5057  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5058  * a remote port.
5059  *
5060  * Return Code:
5061  *   0x2003 - Error
5062  *   0x2002 - Success.
5063  **/
5064 static int
5065 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5066                    unsigned int tgt_id, uint64_t lun_id,
5067                    uint8_t task_mgmt_cmd)
5068 {
5069         struct lpfc_hba   *phba = vport->phba;
5070         struct lpfc_scsi_buf *lpfc_cmd;
5071         struct lpfc_iocbq *iocbq;
5072         struct lpfc_iocbq *iocbqrsp;
5073         struct lpfc_rport_data *rdata;
5074         struct lpfc_nodelist *pnode;
5075         int ret;
5076         int status;
5077
5078         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5079         if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5080                 return FAILED;
5081         pnode = rdata->pnode;
5082
5083         lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
5084         if (lpfc_cmd == NULL)
5085                 return FAILED;
5086         lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5087         lpfc_cmd->rdata = rdata;
5088         lpfc_cmd->pCmd = cmnd;
5089         lpfc_cmd->ndlp = pnode;
5090
5091         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5092                                            task_mgmt_cmd);
5093         if (!status) {
5094                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5095                 return FAILED;
5096         }
5097
5098         iocbq = &lpfc_cmd->cur_iocbq;
5099         iocbqrsp = lpfc_sli_get_iocbq(phba);
5100         if (iocbqrsp == NULL) {
5101                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5102                 return FAILED;
5103         }
5104         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5105
5106         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5107                          "0702 Issue %s to TGT %d LUN %llu "
5108                          "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5109                          lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5110                          pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5111                          iocbq->iocb_flag);
5112
5113         status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5114                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
5115         if ((status != IOCB_SUCCESS) ||
5116             (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5117                 if (status != IOCB_SUCCESS ||
5118                     iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5119                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5120                                          "0727 TMF %s to TGT %d LUN %llu "
5121                                          "failed (%d, %d) iocb_flag x%x\n",
5122                                          lpfc_taskmgmt_name(task_mgmt_cmd),
5123                                          tgt_id, lun_id,
5124                                          iocbqrsp->iocb.ulpStatus,
5125                                          iocbqrsp->iocb.un.ulpWord[4],
5126                                          iocbq->iocb_flag);
5127                 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5128                 if (status == IOCB_SUCCESS) {
5129                         if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5130                                 /* Something in the FCP_RSP was invalid.
5131                                  * Check conditions */
5132                                 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5133                         else
5134                                 ret = FAILED;
5135                 } else if (status == IOCB_TIMEDOUT) {
5136                         ret = TIMEOUT_ERROR;
5137                 } else {
5138                         ret = FAILED;
5139                 }
5140         } else
5141                 ret = SUCCESS;
5142
5143         lpfc_sli_release_iocbq(phba, iocbqrsp);
5144
5145         if (ret != TIMEOUT_ERROR)
5146                 lpfc_release_scsi_buf(phba, lpfc_cmd);
5147
5148         return ret;
5149 }
5150
5151 /**
5152  * lpfc_chk_tgt_mapped -
5153  * @vport: The virtual port to check on
5154  * @cmnd: Pointer to scsi_cmnd data structure.
5155  *
5156  * This routine delays until the scsi target (aka rport) for the
5157  * command exists (is present and logged in) or we declare it non-existent.
5158  *
5159  * Return code :
5160  *  0x2003 - Error
5161  *  0x2002 - Success
5162  **/
5163 static int
5164 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5165 {
5166         struct lpfc_rport_data *rdata;
5167         struct lpfc_nodelist *pnode;
5168         unsigned long later;
5169
5170         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5171         if (!rdata) {
5172                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5173                         "0797 Tgt Map rport failure: rdata x%p\n", rdata);
5174                 return FAILED;
5175         }
5176         pnode = rdata->pnode;
5177         /*
5178          * If target is not in a MAPPED state, delay until
5179          * target is rediscovered or devloss timeout expires.
5180          */
5181         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5182         while (time_after(later, jiffies)) {
5183                 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5184                         return FAILED;
5185                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5186                         return SUCCESS;
5187                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5188                 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5189                 if (!rdata)
5190                         return FAILED;
5191                 pnode = rdata->pnode;
5192         }
5193         if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5194             (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5195                 return FAILED;
5196         return SUCCESS;
5197 }
5198
5199 /**
5200  * lpfc_reset_flush_io_context -
5201  * @vport: The virtual port (scsi_host) for the flush context
5202  * @tgt_id: If aborting by Target contect - specifies the target id
5203  * @lun_id: If aborting by Lun context - specifies the lun id
5204  * @context: specifies the context level to flush at.
5205  *
5206  * After a reset condition via TMF, we need to flush orphaned i/o
5207  * contexts from the adapter. This routine aborts any contexts
5208  * outstanding, then waits for their completions. The wait is
5209  * bounded by devloss_tmo though.
5210  *
5211  * Return code :
5212  *  0x2003 - Error
5213  *  0x2002 - Success
5214  **/
5215 static int
5216 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5217                         uint64_t lun_id, lpfc_ctx_cmd context)
5218 {
5219         struct lpfc_hba   *phba = vport->phba;
5220         unsigned long later;
5221         int cnt;
5222
5223         cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5224         if (cnt)
5225                 lpfc_sli_abort_taskmgmt(vport,
5226                                         &phba->sli.sli3_ring[LPFC_FCP_RING],
5227                                         tgt_id, lun_id, context);
5228         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5229         while (time_after(later, jiffies) && cnt) {
5230                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5231                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5232         }
5233         if (cnt) {
5234                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5235                         "0724 I/O flush failure for context %s : cnt x%x\n",
5236                         ((context == LPFC_CTX_LUN) ? "LUN" :
5237                          ((context == LPFC_CTX_TGT) ? "TGT" :
5238                           ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5239                         cnt);
5240                 return FAILED;
5241         }
5242         return SUCCESS;
5243 }
5244
5245 /**
5246  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5247  * @cmnd: Pointer to scsi_cmnd data structure.
5248  *
5249  * This routine does a device reset by sending a LUN_RESET task management
5250  * command.
5251  *
5252  * Return code :
5253  *  0x2003 - Error
5254  *  0x2002 - Success
5255  **/
5256 static int
5257 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5258 {
5259         struct Scsi_Host  *shost = cmnd->device->host;
5260         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5261         struct lpfc_rport_data *rdata;
5262         struct lpfc_nodelist *pnode;
5263         unsigned tgt_id = cmnd->device->id;
5264         uint64_t lun_id = cmnd->device->lun;
5265         struct lpfc_scsi_event_header scsi_event;
5266         int status;
5267
5268         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5269         if (!rdata || !rdata->pnode) {
5270                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5271                                  "0798 Device Reset rport failure: rdata x%p\n",
5272                                  rdata);
5273                 return FAILED;
5274         }
5275         pnode = rdata->pnode;
5276         status = fc_block_scsi_eh(cmnd);
5277         if (status != 0 && status != SUCCESS)
5278                 return status;
5279
5280         status = lpfc_chk_tgt_mapped(vport, cmnd);
5281         if (status == FAILED) {
5282                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5283                         "0721 Device Reset rport failure: rdata x%p\n", rdata);
5284                 return FAILED;
5285         }
5286
5287         scsi_event.event_type = FC_REG_SCSI_EVENT;
5288         scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5289         scsi_event.lun = lun_id;
5290         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5291         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5292
5293         fc_host_post_vendor_event(shost, fc_get_event_number(),
5294                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5295
5296         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5297                                                 FCP_LUN_RESET);
5298
5299         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5300                          "0713 SCSI layer issued Device Reset (%d, %llu) "
5301                          "return x%x\n", tgt_id, lun_id, status);
5302
5303         /*
5304          * We have to clean up i/o as : they may be orphaned by the TMF;
5305          * or if the TMF failed, they may be in an indeterminate state.
5306          * So, continue on.
5307          * We will report success if all the i/o aborts successfully.
5308          */
5309         if (status == SUCCESS)
5310                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5311                                                 LPFC_CTX_LUN);
5312
5313         return status;
5314 }
5315
5316 /**
5317  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5318  * @cmnd: Pointer to scsi_cmnd data structure.
5319  *
5320  * This routine does a target reset by sending a TARGET_RESET task management
5321  * command.
5322  *
5323  * Return code :
5324  *  0x2003 - Error
5325  *  0x2002 - Success
5326  **/
5327 static int
5328 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5329 {
5330         struct Scsi_Host  *shost = cmnd->device->host;
5331         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5332         struct lpfc_rport_data *rdata;
5333         struct lpfc_nodelist *pnode;
5334         unsigned tgt_id = cmnd->device->id;
5335         uint64_t lun_id = cmnd->device->lun;
5336         struct lpfc_scsi_event_header scsi_event;
5337         int status;
5338
5339         rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5340         if (!rdata) {
5341                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5342                         "0799 Target Reset rport failure: rdata x%p\n", rdata);
5343                 return FAILED;
5344         }
5345         pnode = rdata->pnode;
5346         status = fc_block_scsi_eh(cmnd);
5347         if (status != 0 && status != SUCCESS)
5348                 return status;
5349
5350         status = lpfc_chk_tgt_mapped(vport, cmnd);
5351         if (status == FAILED) {
5352                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5353                         "0722 Target Reset rport failure: rdata x%p\n", rdata);
5354                 if (pnode) {
5355                         spin_lock_irq(shost->host_lock);
5356                         pnode->nlp_flag &= ~NLP_NPR_ADISC;
5357                         pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5358                         spin_unlock_irq(shost->host_lock);
5359                 }
5360                 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5361                                           LPFC_CTX_TGT);
5362                 return FAST_IO_FAIL;
5363         }
5364
5365         scsi_event.event_type = FC_REG_SCSI_EVENT;
5366         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5367         scsi_event.lun = 0;
5368         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5369         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5370
5371         fc_host_post_vendor_event(shost, fc_get_event_number(),
5372                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5373
5374         status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5375                                         FCP_TARGET_RESET);
5376
5377         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5378                          "0723 SCSI layer issued Target Reset (%d, %llu) "
5379                          "return x%x\n", tgt_id, lun_id, status);
5380
5381         /*
5382          * We have to clean up i/o as : they may be orphaned by the TMF;
5383          * or if the TMF failed, they may be in an indeterminate state.
5384          * So, continue on.
5385          * We will report success if all the i/o aborts successfully.
5386          */
5387         if (status == SUCCESS)
5388                 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5389                                           LPFC_CTX_TGT);
5390         return status;
5391 }
5392
5393 /**
5394  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5395  * @cmnd: Pointer to scsi_cmnd data structure.
5396  *
5397  * This routine does target reset to all targets on @cmnd->device->host.
5398  * This emulates Parallel SCSI Bus Reset Semantics.
5399  *
5400  * Return code :
5401  *  0x2003 - Error
5402  *  0x2002 - Success
5403  **/
5404 static int
5405 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5406 {
5407         struct Scsi_Host  *shost = cmnd->device->host;
5408         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5409         struct lpfc_nodelist *ndlp = NULL;
5410         struct lpfc_scsi_event_header scsi_event;
5411         int match;
5412         int ret = SUCCESS, status, i;
5413
5414         scsi_event.event_type = FC_REG_SCSI_EVENT;
5415         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5416         scsi_event.lun = 0;
5417         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5418         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5419
5420         fc_host_post_vendor_event(shost, fc_get_event_number(),
5421                 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5422
5423         status = fc_block_scsi_eh(cmnd);
5424         if (status != 0 && status != SUCCESS)
5425                 return status;
5426
5427         /*
5428          * Since the driver manages a single bus device, reset all
5429          * targets known to the driver.  Should any target reset
5430          * fail, this routine returns failure to the midlayer.
5431          */
5432         for (i = 0; i < LPFC_MAX_TARGET; i++) {
5433                 /* Search for mapped node by target ID */
5434                 match = 0;
5435                 spin_lock_irq(shost->host_lock);
5436                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5437                         if (!NLP_CHK_NODE_ACT(ndlp))
5438                                 continue;
5439                         if (vport->phba->cfg_fcp2_no_tgt_reset &&
5440                             (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5441                                 continue;
5442                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5443                             ndlp->nlp_sid == i &&
5444                             ndlp->rport &&
5445                             ndlp->nlp_type & NLP_FCP_TARGET) {
5446                                 match = 1;
5447                                 break;
5448                         }
5449                 }
5450                 spin_unlock_irq(shost->host_lock);
5451                 if (!match)
5452                         continue;
5453
5454                 status = lpfc_send_taskmgmt(vport, cmnd,
5455                                         i, 0, FCP_TARGET_RESET);
5456
5457                 if (status != SUCCESS) {
5458                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5459                                          "0700 Bus Reset on target %d failed\n",
5460                                          i);
5461                         ret = FAILED;
5462                 }
5463         }
5464         /*
5465          * We have to clean up i/o as : they may be orphaned by the TMFs
5466          * above; or if any of the TMFs failed, they may be in an
5467          * indeterminate state.
5468          * We will report success if all the i/o aborts successfully.
5469          */
5470
5471         status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5472         if (status != SUCCESS)
5473                 ret = FAILED;
5474
5475         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5476                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5477         return ret;
5478 }
5479
5480 /**
5481  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5482  * @cmnd: Pointer to scsi_cmnd data structure.
5483  *
5484  * This routine does host reset to the adaptor port. It brings the HBA
5485  * offline, performs a board restart, and then brings the board back online.
5486  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5487  * reject all outstanding SCSI commands to the host and error returned
5488  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5489  * of error handling, it will only return error if resetting of the adapter
5490  * is not successful; in all other cases, will return success.
5491  *
5492  * Return code :
5493  *  0x2003 - Error
5494  *  0x2002 - Success
5495  **/
5496 static int
5497 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5498 {
5499         struct Scsi_Host *shost = cmnd->device->host;
5500         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5501         struct lpfc_hba *phba = vport->phba;
5502         int rc, ret = SUCCESS;
5503
5504         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5505                          "3172 SCSI layer issued Host Reset Data:\n");
5506
5507         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5508         lpfc_offline(phba);
5509         rc = lpfc_sli_brdrestart(phba);
5510         if (rc)
5511                 ret = FAILED;
5512         rc = lpfc_online(phba);
5513         if (rc)
5514                 ret = FAILED;
5515         lpfc_unblock_mgmt_io(phba);
5516
5517         if (ret == FAILED) {
5518                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5519                                  "3323 Failed host reset, bring it offline\n");
5520                 lpfc_sli4_offline_eratt(phba);
5521         }
5522         return ret;
5523 }
5524
5525 /**
5526  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5527  * @sdev: Pointer to scsi_device.
5528  *
5529  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5530  * globally available list of scsi buffers. This routine also makes sure scsi
5531  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5532  * of scsi buffer exists for the lifetime of the driver.
5533  *
5534  * Return codes:
5535  *   non-0 - Error
5536  *   0 - Success
5537  **/
5538 static int
5539 lpfc_slave_alloc(struct scsi_device *sdev)
5540 {
5541         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5542         struct lpfc_hba   *phba = vport->phba;
5543         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5544         uint32_t total = 0;
5545         uint32_t num_to_alloc = 0;
5546         int num_allocated = 0;
5547         uint32_t sdev_cnt;
5548         struct lpfc_device_data *device_data;
5549         unsigned long flags;
5550         struct lpfc_name target_wwpn;
5551
5552         if (!rport || fc_remote_port_chkready(rport))
5553                 return -ENXIO;
5554
5555         if (phba->cfg_fof) {
5556
5557                 /*
5558                  * Check to see if the device data structure for the lun
5559                  * exists.  If not, create one.
5560                  */
5561
5562                 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5563                 spin_lock_irqsave(&phba->devicelock, flags);
5564                 device_data = __lpfc_get_device_data(phba,
5565                                                      &phba->luns,
5566                                                      &vport->fc_portname,
5567                                                      &target_wwpn,
5568                                                      sdev->lun);
5569                 if (!device_data) {
5570                         spin_unlock_irqrestore(&phba->devicelock, flags);
5571                         device_data = lpfc_create_device_data(phba,
5572                                                         &vport->fc_portname,
5573                                                         &target_wwpn,
5574                                                         sdev->lun,
5575                                                         phba->cfg_XLanePriority,
5576                                                         true);
5577                         if (!device_data)
5578                                 return -ENOMEM;
5579                         spin_lock_irqsave(&phba->devicelock, flags);
5580                         list_add_tail(&device_data->listentry, &phba->luns);
5581                 }
5582                 device_data->rport_data = rport->dd_data;
5583                 device_data->available = true;
5584                 spin_unlock_irqrestore(&phba->devicelock, flags);
5585                 sdev->hostdata = device_data;
5586         } else {
5587                 sdev->hostdata = rport->dd_data;
5588         }
5589         sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5590
5591         /*
5592          * Populate the cmds_per_lun count scsi_bufs into this host's globally
5593          * available list of scsi buffers.  Don't allocate more than the
5594          * HBA limit conveyed to the midlayer via the host structure.  The
5595          * formula accounts for the lun_queue_depth + error handlers + 1
5596          * extra.  This list of scsi bufs exists for the lifetime of the driver.
5597          */
5598         total = phba->total_scsi_bufs;
5599         num_to_alloc = vport->cfg_lun_queue_depth + 2;
5600
5601         /* If allocated buffers are enough do nothing */
5602         if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5603                 return 0;
5604
5605         /* Allow some exchanges to be available always to complete discovery */
5606         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5607                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5608                                  "0704 At limitation of %d preallocated "
5609                                  "command buffers\n", total);
5610                 return 0;
5611         /* Allow some exchanges to be available always to complete discovery */
5612         } else if (total + num_to_alloc >
5613                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5614                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5615                                  "0705 Allocation request of %d "
5616                                  "command buffers will exceed max of %d.  "
5617                                  "Reducing allocation request to %d.\n",
5618                                  num_to_alloc, phba->cfg_hba_queue_depth,
5619                                  (phba->cfg_hba_queue_depth - total));
5620                 num_to_alloc = phba->cfg_hba_queue_depth - total;
5621         }
5622         num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5623         if (num_to_alloc != num_allocated) {
5624                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5625                                          "0708 Allocation request of %d "
5626                                          "command buffers did not succeed.  "
5627                                          "Allocated %d buffers.\n",
5628                                          num_to_alloc, num_allocated);
5629         }
5630         if (num_allocated > 0)
5631                 phba->total_scsi_bufs += num_allocated;
5632         return 0;
5633 }
5634
5635 /**
5636  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5637  * @sdev: Pointer to scsi_device.
5638  *
5639  * This routine configures following items
5640  *   - Tag command queuing support for @sdev if supported.
5641  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5642  *
5643  * Return codes:
5644  *   0 - Success
5645  **/
5646 static int
5647 lpfc_slave_configure(struct scsi_device *sdev)
5648 {
5649         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5650         struct lpfc_hba   *phba = vport->phba;
5651
5652         scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5653
5654         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5655                 lpfc_sli_handle_fast_ring_event(phba,
5656                         &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5657                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5658                         lpfc_poll_rearm_timer(phba);
5659         }
5660
5661         return 0;
5662 }
5663
5664 /**
5665  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5666  * @sdev: Pointer to scsi_device.
5667  *
5668  * This routine sets @sdev hostatdata filed to null.
5669  **/
5670 static void
5671 lpfc_slave_destroy(struct scsi_device *sdev)
5672 {
5673         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5674         struct lpfc_hba   *phba = vport->phba;
5675         unsigned long flags;
5676         struct lpfc_device_data *device_data = sdev->hostdata;
5677
5678         atomic_dec(&phba->sdev_cnt);
5679         if ((phba->cfg_fof) && (device_data)) {
5680                 spin_lock_irqsave(&phba->devicelock, flags);
5681                 device_data->available = false;
5682                 if (!device_data->oas_enabled)
5683                         lpfc_delete_device_data(phba, device_data);
5684                 spin_unlock_irqrestore(&phba->devicelock, flags);
5685         }
5686         sdev->hostdata = NULL;
5687         return;
5688 }
5689
5690 /**
5691  * lpfc_create_device_data - creates and initializes device data structure for OAS
5692  * @pha: Pointer to host bus adapter structure.
5693  * @vport_wwpn: Pointer to vport's wwpn information
5694  * @target_wwpn: Pointer to target's wwpn information
5695  * @lun: Lun on target
5696  * @atomic_create: Flag to indicate if memory should be allocated using the
5697  *                GFP_ATOMIC flag or not.
5698  *
5699  * This routine creates a device data structure which will contain identifying
5700  * information for the device (host wwpn, target wwpn, lun), state of OAS,
5701  * whether or not the corresponding lun is available by the system,
5702  * and pointer to the rport data.
5703  *
5704  * Return codes:
5705  *   NULL - Error
5706  *   Pointer to lpfc_device_data - Success
5707  **/
5708 struct lpfc_device_data*
5709 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5710                         struct lpfc_name *target_wwpn, uint64_t lun,
5711                         uint32_t pri, bool atomic_create)
5712 {
5713
5714         struct lpfc_device_data *lun_info;
5715         int memory_flags;
5716
5717         if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5718             !(phba->cfg_fof))
5719                 return NULL;
5720
5721         /* Attempt to create the device data to contain lun info */
5722
5723         if (atomic_create)
5724                 memory_flags = GFP_ATOMIC;
5725         else
5726                 memory_flags = GFP_KERNEL;
5727         lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5728         if (!lun_info)
5729                 return NULL;
5730         INIT_LIST_HEAD(&lun_info->listentry);
5731         lun_info->rport_data  = NULL;
5732         memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5733                sizeof(struct lpfc_name));
5734         memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5735                sizeof(struct lpfc_name));
5736         lun_info->device_id.lun = lun;
5737         lun_info->oas_enabled = false;
5738         lun_info->priority = pri;
5739         lun_info->available = false;
5740         return lun_info;
5741 }
5742
5743 /**
5744  * lpfc_delete_device_data - frees a device data structure for OAS
5745  * @pha: Pointer to host bus adapter structure.
5746  * @lun_info: Pointer to device data structure to free.
5747  *
5748  * This routine frees the previously allocated device data structure passed.
5749  *
5750  **/
5751 void
5752 lpfc_delete_device_data(struct lpfc_hba *phba,
5753                         struct lpfc_device_data *lun_info)
5754 {
5755
5756         if (unlikely(!phba) || !lun_info  ||
5757             !(phba->cfg_fof))
5758                 return;
5759
5760         if (!list_empty(&lun_info->listentry))
5761                 list_del(&lun_info->listentry);
5762         mempool_free(lun_info, phba->device_data_mem_pool);
5763         return;
5764 }
5765
5766 /**
5767  * __lpfc_get_device_data - returns the device data for the specified lun
5768  * @pha: Pointer to host bus adapter structure.
5769  * @list: Point to list to search.
5770  * @vport_wwpn: Pointer to vport's wwpn information
5771  * @target_wwpn: Pointer to target's wwpn information
5772  * @lun: Lun on target
5773  *
5774  * This routine searches the list passed for the specified lun's device data.
5775  * This function does not hold locks, it is the responsibility of the caller
5776  * to ensure the proper lock is held before calling the function.
5777  *
5778  * Return codes:
5779  *   NULL - Error
5780  *   Pointer to lpfc_device_data - Success
5781  **/
5782 struct lpfc_device_data*
5783 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5784                        struct lpfc_name *vport_wwpn,
5785                        struct lpfc_name *target_wwpn, uint64_t lun)
5786 {
5787
5788         struct lpfc_device_data *lun_info;
5789
5790         if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5791             !phba->cfg_fof)
5792                 return NULL;
5793
5794         /* Check to see if the lun is already enabled for OAS. */
5795
5796         list_for_each_entry(lun_info, list, listentry) {
5797                 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5798                             sizeof(struct lpfc_name)) == 0) &&
5799                     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5800                             sizeof(struct lpfc_name)) == 0) &&
5801                     (lun_info->device_id.lun == lun))
5802                         return lun_info;
5803         }
5804
5805         return NULL;
5806 }
5807
5808 /**
5809  * lpfc_find_next_oas_lun - searches for the next oas lun
5810  * @pha: Pointer to host bus adapter structure.
5811  * @vport_wwpn: Pointer to vport's wwpn information
5812  * @target_wwpn: Pointer to target's wwpn information
5813  * @starting_lun: Pointer to the lun to start searching for
5814  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5815  * @found_target_wwpn: Pointer to the found lun's target wwpn information
5816  * @found_lun: Pointer to the found lun.
5817  * @found_lun_status: Pointer to status of the found lun.
5818  *
5819  * This routine searches the luns list for the specified lun
5820  * or the first lun for the vport/target.  If the vport wwpn contains
5821  * a zero value then a specific vport is not specified. In this case
5822  * any vport which contains the lun will be considered a match.  If the
5823  * target wwpn contains a zero value then a specific target is not specified.
5824  * In this case any target which contains the lun will be considered a
5825  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
5826  * are returned.  The function will also return the next lun if available.
5827  * If the next lun is not found, starting_lun parameter will be set to
5828  * NO_MORE_OAS_LUN.
5829  *
5830  * Return codes:
5831  *   non-0 - Error
5832  *   0 - Success
5833  **/
5834 bool
5835 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5836                        struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5837                        struct lpfc_name *found_vport_wwpn,
5838                        struct lpfc_name *found_target_wwpn,
5839                        uint64_t *found_lun,
5840                        uint32_t *found_lun_status,
5841                        uint32_t *found_lun_pri)
5842 {
5843
5844         unsigned long flags;
5845         struct lpfc_device_data *lun_info;
5846         struct lpfc_device_id *device_id;
5847         uint64_t lun;
5848         bool found = false;
5849
5850         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5851             !starting_lun || !found_vport_wwpn ||
5852             !found_target_wwpn || !found_lun || !found_lun_status ||
5853             (*starting_lun == NO_MORE_OAS_LUN) ||
5854             !phba->cfg_fof)
5855                 return false;
5856
5857         lun = *starting_lun;
5858         *found_lun = NO_MORE_OAS_LUN;
5859         *starting_lun = NO_MORE_OAS_LUN;
5860
5861         /* Search for lun or the lun closet in value */
5862
5863         spin_lock_irqsave(&phba->devicelock, flags);
5864         list_for_each_entry(lun_info, &phba->luns, listentry) {
5865                 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5866                      (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5867                             sizeof(struct lpfc_name)) == 0)) &&
5868                     ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5869                      (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5870                             sizeof(struct lpfc_name)) == 0)) &&
5871                     (lun_info->oas_enabled)) {
5872                         device_id = &lun_info->device_id;
5873                         if ((!found) &&
5874                             ((lun == FIND_FIRST_OAS_LUN) ||
5875                              (device_id->lun == lun))) {
5876                                 *found_lun = device_id->lun;
5877                                 memcpy(found_vport_wwpn,
5878                                        &device_id->vport_wwpn,
5879                                        sizeof(struct lpfc_name));
5880                                 memcpy(found_target_wwpn,
5881                                        &device_id->target_wwpn,
5882                                        sizeof(struct lpfc_name));
5883                                 if (lun_info->available)
5884                                         *found_lun_status =
5885                                                 OAS_LUN_STATUS_EXISTS;
5886                                 else
5887                                         *found_lun_status = 0;
5888                                 *found_lun_pri = lun_info->priority;
5889                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5890                                         memset(vport_wwpn, 0x0,
5891                                                sizeof(struct lpfc_name));
5892                                 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5893                                         memset(target_wwpn, 0x0,
5894                                                sizeof(struct lpfc_name));
5895                                 found = true;
5896                         } else if (found) {
5897                                 *starting_lun = device_id->lun;
5898                                 memcpy(vport_wwpn, &device_id->vport_wwpn,
5899                                        sizeof(struct lpfc_name));
5900                                 memcpy(target_wwpn, &device_id->target_wwpn,
5901                                        sizeof(struct lpfc_name));
5902                                 break;
5903                         }
5904                 }
5905         }
5906         spin_unlock_irqrestore(&phba->devicelock, flags);
5907         return found;
5908 }
5909
5910 /**
5911  * lpfc_enable_oas_lun - enables a lun for OAS operations
5912  * @pha: Pointer to host bus adapter structure.
5913  * @vport_wwpn: Pointer to vport's wwpn information
5914  * @target_wwpn: Pointer to target's wwpn information
5915  * @lun: Lun
5916  *
5917  * This routine enables a lun for oas operations.  The routines does so by
5918  * doing the following :
5919  *
5920  *   1) Checks to see if the device data for the lun has been created.
5921  *   2) If found, sets the OAS enabled flag if not set and returns.
5922  *   3) Otherwise, creates a device data structure.
5923  *   4) If successfully created, indicates the device data is for an OAS lun,
5924  *   indicates the lun is not available and add to the list of luns.
5925  *
5926  * Return codes:
5927  *   false - Error
5928  *   true - Success
5929  **/
5930 bool
5931 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5932                     struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5933 {
5934
5935         struct lpfc_device_data *lun_info;
5936         unsigned long flags;
5937
5938         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5939             !phba->cfg_fof)
5940                 return false;
5941
5942         spin_lock_irqsave(&phba->devicelock, flags);
5943
5944         /* Check to see if the device data for the lun has been created */
5945         lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5946                                           target_wwpn, lun);
5947         if (lun_info) {
5948                 if (!lun_info->oas_enabled)
5949                         lun_info->oas_enabled = true;
5950                 lun_info->priority = pri;
5951                 spin_unlock_irqrestore(&phba->devicelock, flags);
5952                 return true;
5953         }
5954
5955         /* Create an lun info structure and add to list of luns */
5956         lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5957                                            pri, false);
5958         if (lun_info) {
5959                 lun_info->oas_enabled = true;
5960                 lun_info->priority = pri;
5961                 lun_info->available = false;
5962                 list_add_tail(&lun_info->listentry, &phba->luns);
5963                 spin_unlock_irqrestore(&phba->devicelock, flags);
5964                 return true;
5965         }
5966         spin_unlock_irqrestore(&phba->devicelock, flags);
5967         return false;
5968 }
5969
5970 /**
5971  * lpfc_disable_oas_lun - disables a lun for OAS operations
5972  * @pha: Pointer to host bus adapter structure.
5973  * @vport_wwpn: Pointer to vport's wwpn information
5974  * @target_wwpn: Pointer to target's wwpn information
5975  * @lun: Lun
5976  *
5977  * This routine disables a lun for oas operations.  The routines does so by
5978  * doing the following :
5979  *
5980  *   1) Checks to see if the device data for the lun is created.
5981  *   2) If present, clears the flag indicating this lun is for OAS.
5982  *   3) If the lun is not available by the system, the device data is
5983  *   freed.
5984  *
5985  * Return codes:
5986  *   false - Error
5987  *   true - Success
5988  **/
5989 bool
5990 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5991                      struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5992 {
5993
5994         struct lpfc_device_data *lun_info;
5995         unsigned long flags;
5996
5997         if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5998             !phba->cfg_fof)
5999                 return false;
6000
6001         spin_lock_irqsave(&phba->devicelock, flags);
6002
6003         /* Check to see if the lun is available. */
6004         lun_info = __lpfc_get_device_data(phba,
6005                                           &phba->luns, vport_wwpn,
6006                                           target_wwpn, lun);
6007         if (lun_info) {
6008                 lun_info->oas_enabled = false;
6009                 lun_info->priority = pri;
6010                 if (!lun_info->available)
6011                         lpfc_delete_device_data(phba, lun_info);
6012                 spin_unlock_irqrestore(&phba->devicelock, flags);
6013                 return true;
6014         }
6015
6016         spin_unlock_irqrestore(&phba->devicelock, flags);
6017         return false;
6018 }
6019
6020 static int
6021 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6022 {
6023         return SCSI_MLQUEUE_HOST_BUSY;
6024 }
6025
6026 static int
6027 lpfc_no_handler(struct scsi_cmnd *cmnd)
6028 {
6029         return FAILED;
6030 }
6031
6032 static int
6033 lpfc_no_slave(struct scsi_device *sdev)
6034 {
6035         return -ENODEV;
6036 }
6037
6038 struct scsi_host_template lpfc_template_nvme = {
6039         .module                 = THIS_MODULE,
6040         .name                   = LPFC_DRIVER_NAME,
6041         .proc_name              = LPFC_DRIVER_NAME,
6042         .info                   = lpfc_info,
6043         .queuecommand           = lpfc_no_command,
6044         .eh_abort_handler       = lpfc_no_handler,
6045         .eh_device_reset_handler = lpfc_no_handler,
6046         .eh_target_reset_handler = lpfc_no_handler,
6047         .eh_bus_reset_handler   = lpfc_no_handler,
6048         .eh_host_reset_handler  = lpfc_no_handler,
6049         .slave_alloc            = lpfc_no_slave,
6050         .slave_configure        = lpfc_no_slave,
6051         .scan_finished          = lpfc_scan_finished,
6052         .this_id                = -1,
6053         .sg_tablesize           = 1,
6054         .cmd_per_lun            = 1,
6055         .use_clustering         = ENABLE_CLUSTERING,
6056         .shost_attrs            = lpfc_hba_attrs,
6057         .max_sectors            = 0xFFFF,
6058         .vendor_id              = LPFC_NL_VENDOR_ID,
6059         .track_queue_depth      = 0,
6060 };
6061
6062 struct scsi_host_template lpfc_template_no_hr = {
6063         .module                 = THIS_MODULE,
6064         .name                   = LPFC_DRIVER_NAME,
6065         .proc_name              = LPFC_DRIVER_NAME,
6066         .info                   = lpfc_info,
6067         .queuecommand           = lpfc_queuecommand,
6068         .eh_timed_out           = fc_eh_timed_out,
6069         .eh_abort_handler       = lpfc_abort_handler,
6070         .eh_device_reset_handler = lpfc_device_reset_handler,
6071         .eh_target_reset_handler = lpfc_target_reset_handler,
6072         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
6073         .slave_alloc            = lpfc_slave_alloc,
6074         .slave_configure        = lpfc_slave_configure,
6075         .slave_destroy          = lpfc_slave_destroy,
6076         .scan_finished          = lpfc_scan_finished,
6077         .this_id                = -1,
6078         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
6079         .cmd_per_lun            = LPFC_CMD_PER_LUN,
6080         .use_clustering         = ENABLE_CLUSTERING,
6081         .shost_attrs            = lpfc_hba_attrs,
6082         .max_sectors            = 0xFFFF,
6083         .vendor_id              = LPFC_NL_VENDOR_ID,
6084         .change_queue_depth     = scsi_change_queue_depth,
6085         .track_queue_depth      = 1,
6086 };
6087
6088 struct scsi_host_template lpfc_template = {
6089         .module                 = THIS_MODULE,
6090         .name                   = LPFC_DRIVER_NAME,
6091         .proc_name              = LPFC_DRIVER_NAME,
6092         .info                   = lpfc_info,
6093         .queuecommand           = lpfc_queuecommand,
6094         .eh_timed_out           = fc_eh_timed_out,
6095         .eh_abort_handler       = lpfc_abort_handler,
6096         .eh_device_reset_handler = lpfc_device_reset_handler,
6097         .eh_target_reset_handler = lpfc_target_reset_handler,
6098         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
6099         .eh_host_reset_handler  = lpfc_host_reset_handler,
6100         .slave_alloc            = lpfc_slave_alloc,
6101         .slave_configure        = lpfc_slave_configure,
6102         .slave_destroy          = lpfc_slave_destroy,
6103         .scan_finished          = lpfc_scan_finished,
6104         .this_id                = -1,
6105         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
6106         .cmd_per_lun            = LPFC_CMD_PER_LUN,
6107         .use_clustering         = ENABLE_CLUSTERING,
6108         .shost_attrs            = lpfc_hba_attrs,
6109         .max_sectors            = 0xFFFF,
6110         .vendor_id              = LPFC_NL_VENDOR_ID,
6111         .change_queue_depth     = scsi_change_queue_depth,
6112         .track_queue_depth      = 1,
6113 };
6114
6115 struct scsi_host_template lpfc_vport_template = {
6116         .module                 = THIS_MODULE,
6117         .name                   = LPFC_DRIVER_NAME,
6118         .proc_name              = LPFC_DRIVER_NAME,
6119         .info                   = lpfc_info,
6120         .queuecommand           = lpfc_queuecommand,
6121         .eh_timed_out           = fc_eh_timed_out,
6122         .eh_abort_handler       = lpfc_abort_handler,
6123         .eh_device_reset_handler = lpfc_device_reset_handler,
6124         .eh_target_reset_handler = lpfc_target_reset_handler,
6125         .slave_alloc            = lpfc_slave_alloc,
6126         .slave_configure        = lpfc_slave_configure,
6127         .slave_destroy          = lpfc_slave_destroy,
6128         .scan_finished          = lpfc_scan_finished,
6129         .this_id                = -1,
6130         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
6131         .cmd_per_lun            = LPFC_CMD_PER_LUN,
6132         .use_clustering         = ENABLE_CLUSTERING,
6133         .shost_attrs            = lpfc_vport_attrs,
6134         .max_sectors            = 0xFFFF,
6135         .change_queue_depth     = scsi_change_queue_depth,
6136         .track_queue_depth      = 1,
6137 };